From 3f027da8ec796e64b28489f8ddcc4e82e3aa595a Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Wed, 19 Oct 2022 23:30:06 -0600 Subject: skinny paths: Don't have the cache be so big --- lib/btrfsprogs/btrfsutil/skinny_paths.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'lib/btrfsprogs/btrfsutil') diff --git a/lib/btrfsprogs/btrfsutil/skinny_paths.go b/lib/btrfsprogs/btrfsutil/skinny_paths.go index f7d1924..2f71968 100644 --- a/lib/btrfsprogs/btrfsutil/skinny_paths.go +++ b/lib/btrfsprogs/btrfsutil/skinny_paths.go @@ -39,8 +39,10 @@ func (a *SkinnyPathArena) init() { // item, that's about 16M items. But with overhead of the // LRUCache, it's actually a lot higher than that. So then I // cut it to .5M, and that cut my total memory use to ~8GB, - // which is a good number for me. - a.fatItems = containers.NewLRUCache[skinnyItem, btrfstree.TreePathElem](512 * 1024) + // which is a good number for me. Then I tought it to do a + // better job of recovering trees, and so the memory grew, and I + // cut it to 64K. Then to 8K. Then grew it to 128K. + a.fatItems = containers.NewLRUCache[skinnyItem, btrfstree.TreePathElem](128 * 1024) } } -- cgit v1.2.3-54-g00ecf From 53a2ce3b4dcbfd17ac15230da17130a2aeae42dc Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 17 Oct 2022 00:15:06 -0600 Subject: Add and improve comments --- lib/btrfs/btrfsitem/item_blockgroup.go | 2 +- lib/btrfs/btrfsitem/item_devextent.go | 6 +++--- lib/btrfs/btrfsitem/item_extent.go | 2 ++ lib/btrfs/btrfsitem/item_extentdataref.go | 10 ++++++---- lib/btrfs/btrfsitem/item_freespacebitmap.go | 2 ++ lib/btrfs/btrfsitem/item_freespaceinfo.go | 22 ++++++++++++++++++++-- lib/btrfs/btrfsitem/item_inode.go | 4 +++- lib/btrfs/btrfsitem/item_root.go | 14 +++++++++----- lib/btrfs/btrfsitem/item_rootref.go | 11 +++++++++-- lib/btrfs/btrfsitem/item_shareddataref.go | 6 +++++- lib/btrfsprogs/btrfsutil/broken_btree.go | 2 +- 11 files changed, 61 insertions(+), 20 deletions(-) (limited to 'lib/btrfsprogs/btrfsutil') diff --git a/lib/btrfs/btrfsitem/item_blockgroup.go b/lib/btrfs/btrfsitem/item_blockgroup.go index 96ce218..6fc09ac 100644 --- a/lib/btrfs/btrfsitem/item_blockgroup.go +++ b/lib/btrfs/btrfsitem/item_blockgroup.go @@ -14,7 +14,7 @@ import ( // key.offset = size of chunk type BlockGroup struct { // BLOCK_GROUP_ITEM=192 Used int64 `bin:"off=0, siz=8"` - ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // always BTRFS_FIRST_CHUNK_TREE_OBJECTID + ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // always FIRST_CHUNK_TREE_OBJECTID Flags btrfsvol.BlockGroupFlags `bin:"off=16, siz=8"` binstruct.End `bin:"off=24"` } diff --git a/lib/btrfs/btrfsitem/item_devextent.go b/lib/btrfs/btrfsitem/item_devextent.go index 8f5f05c..47bdbcf 100644 --- a/lib/btrfs/btrfsitem/item_devextent.go +++ b/lib/btrfs/btrfsitem/item_devextent.go @@ -13,9 +13,9 @@ import ( // key.objectid = device_id // key.offset = physical_addr type DevExtent struct { // DEV_EXTENT=204 - ChunkTree int64 `bin:"off=0, siz=8"` - ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` - ChunkOffset btrfsvol.LogicalAddr `bin:"off=16, siz=8"` + ChunkTree btrfsprim.ObjID `bin:"off=0, siz=8"` // always CHUNK_TREE_OBJECTID + ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // which chunk within .ChunkTree owns this extent, always FIRST_CHUNK_TREE_OBJECTID + ChunkOffset btrfsvol.LogicalAddr `bin:"off=16, siz=8"` // offset of the CHUNK_ITEM that owns this extent, within the .ChunkObjectID Length btrfsvol.AddrDelta `bin:"off=24, siz=8"` ChunkTreeUUID btrfsprim.UUID `bin:"off=32, siz=16"` binstruct.End `bin:"off=48"` diff --git a/lib/btrfs/btrfsitem/item_extent.go b/lib/btrfs/btrfsitem/item_extent.go index 97dc105..66aae1d 100644 --- a/lib/btrfs/btrfsitem/item_extent.go +++ b/lib/btrfs/btrfsitem/item_extent.go @@ -12,6 +12,8 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil" ) +// key.objectid = laddr of the extent +// key.offset = length of the extent type Extent struct { // EXTENT_ITEM=168 Head ExtentHeader Info TreeBlockInfo // only if .Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK) diff --git a/lib/btrfs/btrfsitem/item_extentdataref.go b/lib/btrfs/btrfsitem/item_extentdataref.go index 74ce799..8c856e2 100644 --- a/lib/btrfs/btrfsitem/item_extentdataref.go +++ b/lib/btrfs/btrfsitem/item_extentdataref.go @@ -9,10 +9,12 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" ) +// key.objectid = laddr of the extent being referenced +// key.offset = crc32c([root,objectid,offset]) type ExtentDataRef struct { // EXTENT_DATA_REF=178 - Root btrfsprim.ObjID `bin:"off=0, siz=8"` - ObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` - Offset int64 `bin:"off=16, siz=8"` - Count int32 `bin:"off=24, siz=4"` + Root btrfsprim.ObjID `bin:"off=0, siz=8"` // subvolume tree ID that references this extent + ObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // inode number that references this extent within the .Root subvolume + Offset int64 `bin:"off=16, siz=8"` // byte offset for the extent within the file + Count int32 `bin:"off=24, siz=4"` // reference count binstruct.End `bin:"off=28"` } diff --git a/lib/btrfs/btrfsitem/item_freespacebitmap.go b/lib/btrfs/btrfsitem/item_freespacebitmap.go index 7842785..ad46204 100644 --- a/lib/btrfs/btrfsitem/item_freespacebitmap.go +++ b/lib/btrfs/btrfsitem/item_freespacebitmap.go @@ -4,6 +4,8 @@ package btrfsitem +// key.objectid = object ID of the FreeSpaceInfo (logical_addr) +// key.offset = offset of the FreeSpaceInfo (size) type FreeSpaceBitmap []byte // FREE_SPACE_BITMAP=200 func (o *FreeSpaceBitmap) UnmarshalBinary(dat []byte) (int, error) { diff --git a/lib/btrfs/btrfsitem/item_freespaceinfo.go b/lib/btrfs/btrfsitem/item_freespaceinfo.go index 844f664..b38da20 100644 --- a/lib/btrfs/btrfsitem/item_freespaceinfo.go +++ b/lib/btrfs/btrfsitem/item_freespaceinfo.go @@ -6,10 +6,28 @@ package btrfsitem import ( "git.lukeshu.com/btrfs-progs-ng/lib/binstruct" + "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil" ) +// key.objectid = object ID of the BlockGroup (logical_addr) +// key.offset = offset of the BlockGroup (size) type FreeSpaceInfo struct { // FREE_SPACE_INFO=198 - ExtentCount int32 `bin:"off=0, siz=4"` - Flags uint32 `bin:"off=4, siz=4"` + ExtentCount int32 `bin:"off=0, siz=4"` + Flags FreeSpaceFlags `bin:"off=4, siz=4"` binstruct.End `bin:"off=8"` } + +type FreeSpaceFlags uint32 + +const ( + FREE_SPACE_USING_BITMAPS = FreeSpaceFlags(1 << iota) +) + +var freeSpaceFlagNames = []string{ + "USING_BITMAPS", +} + +func (f FreeSpaceFlags) Has(req FreeSpaceFlags) bool { return f&req == req } +func (f FreeSpaceFlags) String() string { + return fmtutil.BitfieldString(f, freeSpaceFlagNames, fmtutil.HexNone) +} diff --git a/lib/btrfs/btrfsitem/item_inode.go b/lib/btrfs/btrfsitem/item_inode.go index 8023156..704b56a 100644 --- a/lib/btrfs/btrfsitem/item_inode.go +++ b/lib/btrfs/btrfsitem/item_inode.go @@ -10,12 +10,14 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil" ) +// key.objectid = inode number +// key.offset = 0 type Inode struct { // INODE_ITEM=1 Generation btrfsprim.Generation `bin:"off=0x00, siz=0x08"` TransID int64 `bin:"off=0x08, siz=0x08"` Size int64 `bin:"off=0x10, siz=0x08"` // stat NumBytes int64 `bin:"off=0x18, siz=0x08"` // allocated bytes, may be larger than size (or smaller if there are holes?) - BlockGroup int64 `bin:"off=0x20, siz=0x08"` + BlockGroup btrfsprim.ObjID `bin:"off=0x20, siz=0x08"` // only used for freespace inodes NLink int32 `bin:"off=0x28, siz=0x04"` // stat UID int32 `bin:"off=0x2C, siz=0x04"` // stat GID int32 `bin:"off=0x30, siz=0x04"` // stat diff --git a/lib/btrfs/btrfsitem/item_root.go b/lib/btrfs/btrfsitem/item_root.go index 2e0b327..ffbbf4d 100644 --- a/lib/btrfs/btrfsitem/item_root.go +++ b/lib/btrfs/btrfsitem/item_root.go @@ -11,12 +11,16 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/fmtutil" ) +// key.objectid = tree ID +// key.offset = either +// - 0 if objectid is one of the BTRFS_*_TREE_OBJECTID defines or a non-snapshot volume; or +// - transaction_id of when this snapshot was created type Root struct { // ROOT_ITEM=132 - Inode Inode `bin:"off=0x000, siz=0xa0"` + Inode Inode `bin:"off=0x000, siz=0xa0"` // ??? Generation btrfsprim.Generation `bin:"off=0x0a0, siz=0x08"` - RootDirID btrfsprim.ObjID `bin:"off=0x0a8, siz=0x08"` - ByteNr btrfsvol.LogicalAddr `bin:"off=0x0b0, siz=0x08"` - ByteLimit int64 `bin:"off=0x0b8, siz=0x08"` + RootDirID btrfsprim.ObjID `bin:"off=0x0a8, siz=0x08"` // inode number of the root inode + ByteNr btrfsvol.LogicalAddr `bin:"off=0x0b0, siz=0x08"` // root node + ByteLimit int64 `bin:"off=0x0b8, siz=0x08"` // always 0 (unused) BytesUsed int64 `bin:"off=0x0c0, siz=0x08"` LastSnapshot int64 `bin:"off=0x0c8, siz=0x08"` Flags RootFlags `bin:"off=0x0d0, siz=0x08"` @@ -36,7 +40,7 @@ type Root struct { // ROOT_ITEM=132 OTime btrfsprim.Time `bin:"off=0x153, siz=0x0c"` STime btrfsprim.Time `bin:"off=0x15f, siz=0x0c"` RTime btrfsprim.Time `bin:"off=0x16b, siz=0x0c"` - GlobalTreeID btrfsprim.ObjID `bin:"off=0x177, siz=0x08"` + GlobalTreeID btrfsprim.ObjID `bin:"off=0x177, siz=0x08"` // ??? Reserved [7]int64 `bin:"off=0x17f, siz=0x38"` binstruct.End `bin:"off=0x1b7"` } diff --git a/lib/btrfs/btrfsitem/item_rootref.go b/lib/btrfs/btrfsitem/item_rootref.go index d1dfd3b..b33883d 100644 --- a/lib/btrfs/btrfsitem/item_rootref.go +++ b/lib/btrfs/btrfsitem/item_rootref.go @@ -12,9 +12,16 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" ) +// RootRefs link subvolumes parent→child for normal subvolumes and +// base→snapshot for snapshot subvolumes. BACKREF items go the other +// direction; child→parent and snapshot→base. +// +// ROOT_REF | ROOT_BACKREF +// key.objectid = ID of the parent subvolume | ID of the child subvolume +// key.offset = ID of the child subvolume | ID of the parent subvolume type RootRef struct { // ROOT_REF=156 ROOT_BACKREF=144 - DirID btrfsprim.ObjID `bin:"off=0x00, siz=0x8"` - Sequence int64 `bin:"off=0x08, siz=0x8"` + DirID btrfsprim.ObjID `bin:"off=0x00, siz=0x8"` // inode of the parent directory of the dir entry + Sequence int64 `bin:"off=0x08, siz=0x8"` // index of that dir entry within the parent NameLen uint16 `bin:"off=0x10, siz=0x2"` // [ignored-when-writing] binstruct.End `bin:"off=0x12"` Name []byte `bin:"-"` diff --git a/lib/btrfs/btrfsitem/item_shareddataref.go b/lib/btrfs/btrfsitem/item_shareddataref.go index 5ce4198..d7765af 100644 --- a/lib/btrfs/btrfsitem/item_shareddataref.go +++ b/lib/btrfs/btrfsitem/item_shareddataref.go @@ -8,7 +8,11 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/binstruct" ) +// key.objectid = laddr of the extent being referenced +// +// key.offset = laddr of the leaf node containing the FileExtent +// (EXTENT_DATA_KEY) for this reference. type SharedDataRef struct { // SHARED_DATA_REF=184 - Count int32 `bin:"off=0, siz=4"` + Count int32 `bin:"off=0, siz=4"` // reference count binstruct.End `bin:"off=4"` } diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsprogs/btrfsutil/broken_btree.go index 1fa84e7..0660a46 100644 --- a/lib/btrfsprogs/btrfsutil/broken_btree.go +++ b/lib/btrfsprogs/btrfsutil/broken_btree.go @@ -75,7 +75,7 @@ var _ btrfstree.TreeOperator = (*brokenTrees)(nil) // NewBrokenTrees wraps a *btrfs.FS to support looking up information // from broken trees. // -// Of the btrfs.FS.Tree{Verb}Trees methods: +// Of the btrfs.FS.Tree{Verb} methods: // // - TreeWalk works on broken trees // - TreeLookup relies on the tree being properly ordered (which a -- cgit v1.2.3-54-g00ecf From 6c03becc595c9938644e767c852a2627d4a265d3 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 17 Oct 2022 00:31:38 -0600 Subject: rebuildnodes: wip: New rebuild-nodes implementation --- cmd/btrfs-rec/inspect_rebuildnodes.go | 6 +- .../btrfsinspect/rebuildnodes/bak_rebuildnodes.go | 102 ------- .../btrfsinspect/rebuildnodes/bak_rebuilttrees.go | 89 ------ .../btrfsinspect/rebuildnodes/bak_s3_reinit.go | 139 --------- .../rebuildnodes/bak_s3_reinit_test.go | 82 ------ .../btrfsinspect/rebuildnodes/bak_s4_reattach.go | 161 ----------- .../btrfsinspect/rebuildnodes/orphans.go | 102 +++++++ .../btrfsinspect/rebuildnodes/rebuild.go | 107 +++++++ .../btrfsinspect/rebuildnodes/rebuild_graph.go | 322 +++++++++++++++++++++ .../btrfsinspect/rebuildnodes/treeancestors.go | 76 ----- lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go | 30 +- lib/btrfsprogs/btrfsutil/broken_btree.go | 6 + scripts/main.sh | 6 +- 13 files changed, 553 insertions(+), 675 deletions(-) delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go create mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go delete mode 100644 lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go (limited to 'lib/btrfsprogs/btrfsutil') diff --git a/cmd/btrfs-rec/inspect_rebuildnodes.go b/cmd/btrfs-rec/inspect_rebuildnodes.go index 5d0a54e..5f6d9b5 100644 --- a/cmd/btrfs-rec/inspect_rebuildnodes.go +++ b/cmd/btrfs-rec/inspect_rebuildnodes.go @@ -4,7 +4,6 @@ package main -/* import ( "bufio" "io" @@ -16,8 +15,10 @@ import ( "github.com/spf13/cobra" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" ) func init() { @@ -52,7 +53,7 @@ func init() { }) } -func writeNodesJSON(w io.Writer, rebuiltNodes map[btrfsvol.LogicalAddr]*rebuildnodes.RebuiltNode) (err error) { +func writeNodesJSON(w io.Writer, rebuiltNodes map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]) (err error) { buffer := bufio.NewWriter(w) defer func() { if _err := buffer.Flush(); err == nil && _err != nil { @@ -66,4 +67,3 @@ func writeNodesJSON(w io.Writer, rebuiltNodes map[btrfsvol.LogicalAddr]*rebuildn ForceTrailingNewlines: true, }, rebuiltNodes) } -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go deleted file mode 100644 index 1a9f487..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuildnodes.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -/* -import ( - "context" - "errors" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" - "git.lukeshu.com/btrfs-progs-ng/lib/diskio" -) - -func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) { - uuidMap, err := buildUUIDMap(ctx, fs, nodeScanResults) - if err != nil { - return nil, err - } - - nfs := &RebuiltTrees{ - inner: fs, - uuidMap: uuidMap, - } - - orphanedNodes, badNodes, treeAncestors, err := classifyNodes(ctx, nfs, nodeScanResults) - if err != nil { - return nil, err - } - - uuidMap.considerAncestors(ctx, treeAncestors) - - rebuiltNodes, err := reInitBrokenNodes(ctx, nfs, badNodes) - if err != nil { - return nil, err - } - - if err := reAttachNodes(ctx, nfs, orphanedNodes, rebuiltNodes); err != nil { - return nil, err - } - - return rebuiltNodes, nil -} - -func spanOfTreePath(fs _FS, path btrfstree.TreePath) (btrfsprim.Key, btrfsprim.Key) { - // tree root error - if len(path) == 0 { - return btrfsprim.Key{}, maxKey - } - - // item error - if path.Node(-1).ToNodeAddr == 0 { - // If we got an item error, then the node is readable - node, _ := fs.ReadNode(path.Parent()) - key := node.Data.BodyLeaf[path.Node(-1).FromItemIdx].Key - return key, key - } - - // node error - // - // assume that path.Node(-1).ToNodeAddr is not readable, but that path.Node(-2).ToNodeAddr is. - if len(path) == 1 { - return btrfsprim.Key{}, maxKey - } - parentNode, _ := fs.ReadNode(path.Parent()) - low := parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx].Key - var high btrfsprim.Key - if path.Node(-1).FromItemIdx+1 < len(parentNode.Data.BodyInternal) { - high = keyMm(parentNode.Data.BodyInternal[path.Node(-1).FromItemIdx+1].Key) - } else { - parentPath := path.Parent().DeepCopy() - _, high = spanOfTreePath(fs, parentPath) - } - return low, high -} - -func getChunkTreeUUID(ctx context.Context, fs _FS) (btrfsprim.UUID, bool) { - ctx, cancel := context.WithCancel(ctx) - var ret btrfsprim.UUID - var retOK bool - btrfsutil.WalkAllTrees(ctx, fs, btrfsutil.WalkAllTreesHandler{ - TreeWalkHandler: btrfstree.TreeWalkHandler{ - Node: func(path btrfstree.TreePath, node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error { - ret = node.Data.Head.ChunkTreeUUID - retOK = true - cancel() - return nil - }, - }, - Err: func(err *btrfsutil.WalkError) { - // do nothing - }, - }) - return ret, retOK -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go deleted file mode 100644 index 7e1a7e9..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_rebuilttrees.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -/* -import ( - "context" - "fmt" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/diskio" -) - -type RebuiltTrees struct { - inner *btrfs.FS - uuidMap uuidMap - nodes map[btrfsvol.LogicalAddr]*RebuiltNode -} - -type _FS interface { - diskio.File[btrfsvol.LogicalAddr] - btrfstree.NodeFile - btrfstree.NodeSource - btrfstree.TreeOperator -} - -// diskio.File - -func (fs *RebuiltTrees) Name() string { return fs.inner.Name() } -func (fs *RebuiltTrees) Size() btrfsvol.LogicalAddr { return fs.inner.Size() } -func (fs *RebuiltTrees) Close() error { return fs.inner.Close() } -func (fs *RebuiltTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { - sb, err := fs.Superblock() - if err != nil { - return 0, err - } - if rebuilt, ok := fs.nodes[off]; ok && len(p) == int(sb.NodeSize) { - rebuilt.Node.Head.Checksum, err = rebuilt.Node.CalculateChecksum() - if err != nil { - panic(fmt.Errorf("should not happen: %w", err)) - } - bs, err := rebuilt.Node.MarshalBinary() - if err != nil { - panic(fmt.Errorf("should not happen: %w", err)) - } - if len(p) != len(bs) { - panic(fmt.Errorf("should not happen: sb.NodeSize=%v but node marshaled to %v", sb.NodeSize, len(bs))) - } - return copy(p, bs), nil - } - return fs.inner.ReadAt(p, off) -} -func (fs *RebuiltTrees) WriteAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { - return fs.inner.WriteAt(p, off) -} - -// btrfstree.NodeFile - -func (fs *RebuiltTrees) ParentTree(tree btrfsprim.ObjID) (btrfsprim.ObjID, bool) { - return fs.uuidMap.ParentTree(tree) -} - -// btrfstree.NodeSource - -func (fs *RebuiltTrees) Superblock() (*btrfstree.Superblock, error) { return fs.inner.Superblock() } -func (fs *RebuiltTrees) ReadNode(path btrfstree.TreePath) (*diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node], error) { - return btrfstree.FSReadNode(fs, path) -} - -// btrfstree.TreeOperator - -func (fs *RebuiltTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) { - btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeWalk(ctx, treeID, errHandle, cbs) -} -func (fs *RebuiltTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) { - return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeLookup(treeID, key) -} -func (fs *RebuiltTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) (btrfstree.Item, error) { - return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearch(treeID, fn) -} -func (fs *RebuiltTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(key btrfsprim.Key, size uint32) int) ([]btrfstree.Item, error) { - return btrfstree.TreeOperatorImpl{NodeSource: fs}.TreeSearchAll(treeID, fn) -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go deleted file mode 100644 index 5983e2f..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -/* -import ( - "fmt" - "reflect" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/slices" -) - -type RebuiltNode struct { - Errs containers.Set[string] - MinKey, MaxKey btrfsprim.Key - InTrees containers.Set[btrfsprim.ObjID] - btrfstree.Node -} - -func (a RebuiltNode) Compat(b RebuiltNode) bool { - a.Node.Head.Generation = b.Node.Head.Generation - return reflect.DeepEqual(a.Node, b.Node) -} - -func (a RebuiltNode) Merge(b RebuiltNode) (RebuiltNode, error) { - if !a.Compat(b) { - switch { - case a.Node.Head.Generation > b.Node.Head.Generation: - return a, nil - case a.Node.Head.Generation < b.Node.Head.Generation: - return b, nil - default: - return a, fmt.Errorf("mismatch: %v != %v", a, b) - } - } - - // take the broadest region - if a.MinKey.Cmp(b.MinKey) > 0 { // if a.MinKey > b.MinKey { - a.MinKey = b.MinKey // take the min of the two - } - if a.MaxKey.Cmp(b.MaxKey) < 0 { // if a.MaxKey < b.MaxKey { - a.MaxKey = b.MaxKey // take the min of the two - } - - // take the highest generation - a.Node.Head.Generation = slices.Max(a.Node.Head.Generation, b.Node.Head.Generation) - - // take the union - a.InTrees.InsertFrom(b.InTrees) - a.Errs.InsertFrom(b.Errs) - - return a, nil -} - -func reInitBrokenNodes(ctx context.Context, fs _FS, badNodes []badNode) (map[btrfsvol.LogicalAddr]*RebuiltNode, error) { - dlog.Info(ctx, "Re-initializing bad nodes...") - - sb, err := fs.Superblock() - if err != nil { - return nil, err - } - chunkTreeUUID, ok := getChunkTreeUUID(ctx, fs) - if !ok { - return nil, fmt.Errorf("could not look up chunk tree UUID") - } - - sort.Slice(badNodes, func(i, j int) bool { - iGen := badNodes[i].Path.Node(-1).ToNodeGeneration - jGen := badNodes[j].Path.Node(-1).ToNodeGeneration - switch { - case iGen < jGen: - return true - case iGen > jGen: - return false - default: - iAddr := badNodes[i].Path.Node(-1).ToNodeAddr - jAddr := badNodes[j].Path.Node(-1).ToNodeAddr - return iAddr < jAddr - } - }) - - lastPct := -1 - progress := func(done int) { - pct := int(100 * float64(done) / float64(len(badNodes))) - if pct != lastPct || done == len(badNodes) { - dlog.Infof(ctx, "... %v%% (%v/%v)", - pct, done, len(badNodes)) - lastPct = pct - } - } - - rebuiltNodes := make(map[btrfsvol.LogicalAddr]*RebuiltNode) - for i, badNode := range badNodes { - progress(i) - path := badNode.Path - - min, max := spanOfTreePath(fs, path) - node := RebuiltNode{ - Errs: containers.NewSet[string]( - badNode.Err, - ), - MinKey: min, - MaxKey: max, - InTrees: containers.NewSet[btrfsprim.ObjID]( - path.Node(-1).FromTree, - ), - Node: btrfstree.Node{ - Size: sb.NodeSize, - ChecksumType: sb.ChecksumType, - Head: btrfstree.NodeHeader{ - MetadataUUID: sb.EffectiveMetadataUUID(), - Addr: path.Node(-1).ToNodeAddr, - ChunkTreeUUID: chunkTreeUUID, - //Owner: TBD, // see RebuiltNode.InTrees - Generation: path.Node(-1).ToNodeGeneration, - Level: path.Node(-1).ToNodeLevel, - }, - }, - } - if other, ok := rebuiltNodes[path.Node(-1).ToNodeAddr]; ok { - *other, err = other.Merge(node) - if err != nil { - dlog.Errorf(ctx, "... %v", err) - } - } else { - rebuiltNodes[path.Node(-1).ToNodeAddr] = &node - } - } - progress(len(badNodes)) - - dlog.Infof(ctx, "... initialized %d nodes", len(rebuiltNodes)) - return rebuiltNodes, nil -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go deleted file mode 100644 index 865cd7e..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s3_reinit_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes_test - -/* -import ( - "strings" - "testing" - - "git.lukeshu.com/go/lowmemjson" - "github.com/stretchr/testify/assert" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" -) - -func TestEncodeRebuiltNodes(t *testing.T) { - dat := map[btrfsvol.LogicalAddr]*rebuildnodes.RebuiltNode{ - 100007133184: { - Errs: containers.NewSet[string]( - "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025", - ), - MinKey: btrfsprim.Key{}, - MaxKey: btrfsprim.Key{}, - InTrees: containers.NewSet[btrfsprim.ObjID]( - 257, - ), - Node: btrfstree.Node{}, - }, - } - var buf strings.Builder - assert.NoError(t, lowmemjson.Encode(&lowmemjson.ReEncoder{ - Out: &buf, - - Indent: "\t", - ForceTrailingNewlines: true, - }, dat)) - assert.Equal(t, `{ - "100007133184": { - "Errs": [ - "btrfs.ReadNode: node@0x0000001748e3c000: expected generation\u003c=6596014 but claims to be generation=6596025" - ], - "MinKey": { - "ObjectID": 0, - "ItemType": 0, - "Offset": 0 - }, - "MaxKey": { - "ObjectID": 0, - "ItemType": 0, - "Offset": 0 - }, - "InTrees": [ - 257 - ], - "Size": 0, - "ChecksumType": 0, - "Head": { - "Checksum": "0000000000000000000000000000000000000000000000000000000000000000", - "MetadataUUID": "00000000-0000-0000-0000-000000000000", - "Addr": 0, - "Flags": 0, - "BackrefRev": 0, - "ChunkTreeUUID": "00000000-0000-0000-0000-000000000000", - "Generation": 0, - "Owner": 0, - "NumItems": 0, - "Level": 0 - }, - "BodyInternal": null, - "BodyLeaf": null, - "Padding": null - } -} -`, buf.String()) -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go deleted file mode 100644 index a78d964..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/bak_s4_reattach.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -/* -import ( - "context" - "sort" - - "github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" - "git.lukeshu.com/btrfs-progs-ng/lib/maps" -) - -func (a RebuiltNode) ContainsWholeRegion(min, max btrfsprim.Key) int { - switch { - case min.Cmp(a.MinKey) < 0: - // 'a' is too far right - return -1 - case max.Cmp(a.MaxKey) > 0: - // 'a' is too far left - return 1 - default: - // just right - return 0 - } -} - -func reAttachNodes(ctx context.Context, fs _FS, orphanedNodes containers.Set[btrfsvol.LogicalAddr], rebuiltNodes map[btrfsvol.LogicalAddr]*RebuiltNode) error { - dlog.Info(ctx, "Attaching orphaned nodes to rebuilt nodes...") - - sb, err := fs.Superblock() - if err != nil { - return err - } - - // Index 'rebuiltNodes' for fast lookups. - dlog.Info(ctx, "... indexing rebuilt nodes...") - var byLevel [][]*RebuiltNode - for _, node := range rebuiltNodes { - for int(node.Head.Level) >= len(byLevel) { - byLevel = append(byLevel, []*RebuiltNode(nil)) - } - byLevel[node.Head.Level] = append(byLevel[node.Head.Level], node) - } - for _, slice := range byLevel { - sort.Slice(slice, func(i, j int) bool { - return slice[i].MinKey.Cmp(slice[j].MinKey) < 0 - }) - } - dlog.Info(ctx, "... done indexing") - - // Attach orphanedNodes to the gaps. - dlog.Info(ctx, "... attaching nodes...") - lastPct := -1 - progress := func(done int) { - pct := int(100 * float64(done) / float64(len(orphanedNodes))) - if pct != lastPct || done == len(orphanedNodes) { - dlog.Infof(ctx, "... %v%% (%v/%v)", - pct, done, len(orphanedNodes)) - lastPct = pct - } - } - numAttached := 0 - for i, foundLAddr := range maps.SortedKeys(orphanedNodes) { - progress(i) - foundRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, foundLAddr, btrfstree.NodeExpectations{ - LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: foundLAddr}, - }) - if foundRef == nil { - return err - } - foundMinKey, ok := foundRef.Data.MinItem() - if !ok { - continue - } - foundMaxKey, ok := foundRef.Data.MaxItem() - if !ok { - continue - } - - // `trees` is the set of trees that the node may be - // placed in; '0' is a wildcard that means "any tree". - // We still keep track of the others, in order to try - // to avoid using the wildcard. - trees := make(containers.Set[btrfsprim.ObjID]) - tree := foundRef.Data.Head.Owner - for { - trees.Insert(tree) - var ok bool - tree, ok = fs.ParentTree(tree) - if !ok { - // error; accept anything - trees.Insert(0) - break - } - if tree == 0 { - // end of the line - break - } - } - attached := make(containers.Set[btrfsprim.ObjID]) - for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ { - for _, parent := range byLevel[level] { - if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 { - continue - } - if parent.Node.Head.Generation < foundRef.Data.Head.Generation { - continue - } - if !trees.HasAny(parent.InTrees) { - continue - } - parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{ - Key: foundMinKey, - BlockPtr: foundLAddr, - Generation: foundRef.Data.Head.Generation, - }) - attached.InsertFrom(parent.InTrees) - } - } - if _, wildcard := trees[0]; wildcard && len(attached) == 0 { - for level := int(foundRef.Data.Head.Level) + 1; level < len(byLevel) && len(attached) == 0; level++ { - for _, parent := range byLevel[level] { - if parent.ContainsWholeRegion(foundMinKey, foundMaxKey) != 0 { - continue - } - if parent.Node.Head.Generation < foundRef.Data.Head.Generation { - continue - } - parent.BodyInternal = append(parent.BodyInternal, btrfstree.KeyPointer{ - Key: foundMinKey, - BlockPtr: foundLAddr, - Generation: foundRef.Data.Head.Generation, - }) - attached.InsertFrom(parent.InTrees) - } - } - } - - if len(attached) > 0 { - numAttached++ - } else { - dlog.Errorf(ctx, "could not find a broken node to attach node to reattach node@%v to", - foundRef.Addr) - } - } - progress(len(orphanedNodes)) - dlog.Info(ctx, "... ... done attaching") - - dlog.Infof(ctx, "... re-attached %d nodes (%v%% success rate)", - numAttached, int(100*float64(numAttached)/float64(len(orphanedNodes)))) - return nil -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go new file mode 100644 index 0000000..3d375f0 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/orphans.go @@ -0,0 +1,102 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" +) + +func listRoots(graph nodeGraph, leaf btrfsvol.LogicalAddr) containers.Set[btrfsvol.LogicalAddr] { + kps := graph.EdgesTo[leaf] + if len(kps) == 0 { + return containers.NewSet(leaf) + } + ret := make(containers.Set[btrfsvol.LogicalAddr]) + for _, kp := range kps { + ret.InsertFrom(listRoots(graph, kp.FromNode)) + } + return ret +} + +func walk(graph nodeGraph, root btrfsvol.LogicalAddr, fn func(btrfsvol.LogicalAddr) bool) { + if _, ok := graph.Nodes[root]; !ok { + return + } + if !fn(root) { + return + } + for _, kp := range graph.EdgesFrom[root] { + walk(graph, kp.ToNode, fn) + } +} + +type keyAndTree struct { + btrfsprim.Key + TreeID btrfsprim.ObjID +} + +func (a keyAndTree) Cmp(b keyAndTree) int { + if d := a.Key.Cmp(b.Key); d != 0 { + return d + } + return containers.NativeCmp(a.TreeID, b.TreeID) +} + +func indexOrphans(fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock, graph nodeGraph) ( + orphans containers.Set[btrfsvol.LogicalAddr], + leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr], + key2leaf *containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr], + err error, +) { + orphans = make(containers.Set[btrfsvol.LogicalAddr]) + for node := range graph.Nodes { + if len(graph.EdgesTo[node]) == 0 { + orphans.Insert(node) + } + } + + leaf2orphans = make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) + visited := make(containers.Set[btrfsvol.LogicalAddr]) + for orphan := range orphans { + walk(graph, orphan, func(node btrfsvol.LogicalAddr) bool { + if visited.Has(node) { + return false + } + visited.Insert(node) + if graph.Nodes[node].Level == 0 { + if roots := listRoots(graph, node); !roots.Has(0) { + leaf2orphans[node] = roots + } + } + return true + }) + } + + key2leaf = new(containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr]) + for laddr := range leaf2orphans { + nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{ + LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, + Level: containers.Optional[uint8]{OK: true, Val: 0}, + }) + if err != nil { + return nil, nil, nil, err + } + + for _, item := range nodeRef.Data.BodyLeaf { + k := keyAndTree{ + Key: item.Key, + TreeID: nodeRef.Data.Head.Owner, + } + if cur, ok := key2leaf.Load(k); !ok || graph.Nodes[cur].Generation < nodeRef.Data.Head.Generation { + key2leaf.Store(k, laddr) + } + } + } + return orphans, leaf2orphans, key2leaf, nil +} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go new file mode 100644 index 0000000..536b61d --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild.go @@ -0,0 +1,107 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "context" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" +) + +type Rebuilder struct { + inner interface { + btrfstree.TreeOperator + Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) + } + + orphans containers.Set[btrfsvol.LogicalAddr] + leaf2orphans map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] + key2leaf containers.SortedMap[keyAndTree, btrfsvol.LogicalAddr] + + augments map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] +} + +func RebuildNodes(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], error) { + scanData, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging + if err != nil { + return nil, err + } + + dlog.Info(ctx, "Reading superblock...") + sb, err := fs.Superblock() + if err != nil { + return nil, err + } + + dlog.Info(ctx, "Indexing orphans...") + orphans, leaf2orphans, key2leaf, err := indexOrphans(fs, *sb, scanData.nodeGraph) + if err != nil { + return nil, err + } + + dlog.Info(ctx, "Rebuilding node tree...") + o := &Rebuilder{ + inner: btrfsutil.NewBrokenTrees(ctx, fs), + + orphans: orphans, + leaf2orphans: leaf2orphans, + key2leaf: *key2leaf, + + augments: make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]), + } + if err := o.rebuild(ctx); err != nil { + return nil, err + } + + return o.augments, nil +} + +func (o *Rebuilder) rebuild(ctx context.Context) error { + // TODO + //btrfsutil.WalkAllTrees(ctx, o.inner) + handleItem(o, ctx, 0, btrfstree.Item{}) + return nil +} + +// err implements rebuildCallbacks. +func (o *Rebuilder) err(ctx context.Context, e error) { + // TODO +} + +// want implements rebuildCallbacks. +func (o *Rebuilder) want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { + // TODO +} + +// wantOff implements rebuildCallbacks. +func (o *Rebuilder) wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { + // TODO +} + +// wantFunc implements rebuildCallbacks. +func (o *Rebuilder) wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) { + // TODO +} + +// func implements rebuildCallbacks. +// +// interval is [beg, end) +func (o *Rebuilder) wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) { + // TODO +} + +// wantFileExt implements rebuildCallbacks. +func (o *Rebuilder) wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { + // TODO +} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go new file mode 100644 index 0000000..8247651 --- /dev/null +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/rebuild_graph.go @@ -0,0 +1,322 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package rebuildnodes + +import ( + "context" + "fmt" + "reflect" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" +) + +type rebuildCallbacks interface { + err(ctx context.Context, e error) + want(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) + wantOff(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) + wantFunc(ctx context.Context, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, fn func(btrfsitem.Item) bool) + wantCSum(ctx context.Context, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) + wantFileExt(ctx context.Context, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) +} + +func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { + ctx = dlog.WithField(ctx, "tree", treeID) + ctx = dlog.WithField(ctx, "key", item.Key) + + // Notionally, just express the relationships shown in + // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page + // https://btrfs.wiki.kernel.org/index.php/Data_Structures ) + switch body := item.Body.(type) { + case btrfsitem.BlockGroup: + o.want(dlog.WithField(ctx, "wants", "Chunk"), + btrfsprim.CHUNK_TREE_OBJECTID, + body.ChunkObjectID, + btrfsitem.CHUNK_ITEM_KEY) + o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"), + btrfsprim.FREE_SPACE_TREE_OBJECTID, + item.Key.ObjectID, + btrfsitem.FREE_SPACE_INFO_KEY, + item.Key.Offset) + case btrfsitem.Chunk: + o.want(dlog.WithField(ctx, "wants", "owning Root"), + btrfsprim.ROOT_TREE_OBJECTID, + body.Head.Owner, + btrfsitem.ROOT_ITEM_KEY) + case btrfsitem.Dev: + // nothing + case btrfsitem.DevExtent: + o.wantOff(dlog.WithField(ctx, "wants", "Chunk"), + body.ChunkTree, + body.ChunkObjectID, + btrfsitem.CHUNK_ITEM_KEY, + uint64(body.ChunkOffset)) + case btrfsitem.DevStats: + // nothing + case btrfsitem.DirEntry: + // containing-directory + o.wantOff(dlog.WithField(ctx, "wants", "containing dir inode"), + treeID, + item.Key.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + // siblings + switch item.Key.ItemType { + case btrfsitem.DIR_ITEM_KEY: + o.wantFunc(dlog.WithField(ctx, "wants", "corresponding DIR_INDEX"), + treeID, + item.Key.ObjectID, + btrfsitem.DIR_INDEX_KEY, + func(peerItem btrfsitem.Item) bool { + return reflect.DeepEqual(item, peerItem) + }) + case btrfsitem.DIR_INDEX_KEY: + o.wantOff(dlog.WithField(ctx, "wants", "corresponding DIR_ITEM"), + treeID, + item.Key.ObjectID, + btrfsitem.DIR_ITEM_KEY, + btrfsitem.NameHash(body.Name)) + case btrfsitem.XATTR_ITEM_KEY: + // nothing + } + // item-within-directory + if body.Location != (btrfsprim.Key{}) { + switch body.Location.ItemType { + case btrfsitem.INODE_ITEM_KEY: + o.wantOff(dlog.WithField(ctx, "wants", "item being pointed to"), + treeID, + body.Location.ObjectID, + body.Location.ItemType, + body.Location.Offset) + o.wantOff(dlog.WithField(ctx, "wants", "backref from item being pointed to"), + treeID, + body.Location.ObjectID, + btrfsitem.INODE_REF_KEY, + uint64(item.Key.ObjectID)) + case btrfsitem.ROOT_ITEM_KEY: + o.want(dlog.WithField(ctx, "wants", "Root of subvolume being pointed to"), + btrfsprim.ROOT_TREE_OBJECTID, + body.Location.ObjectID, + body.Location.ItemType) + default: + o.err(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType: %v", body.Location.ItemType)) + } + } + case btrfsitem.Empty: + // nothing + case btrfsitem.Extent: + //if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) { + // // Supposedly this flag indicates that that + // // body.Info.Key identifies a node by the + // // first key in the node. But nothing in the + // // kernel ever reads this, so who knows if it + // // always gets updated correctly? + //} + for _, ref := range body.Refs { + switch refBody := ref.Body.(type) { + case nil: + // nothing + case btrfsitem.ExtentDataRef: + o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"), + refBody.Root, + refBody.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + refBody.Root, + refBody.ObjectID, + btrfsitem.EXTENT_DATA_KEY, + uint64(refBody.Offset)) + case btrfsitem.SharedDataRef: + // nothing + default: + panic("should not happen") + } + } + case btrfsitem.ExtentCSum: + // nothing + case btrfsitem.ExtentDataRef: + o.want(dlog.WithField(ctx, "wants", "Extent being referenced"), + btrfsprim.EXTENT_TREE_OBJECTID, + item.Key.ObjectID, + btrfsitem.EXTENT_ITEM_KEY) + o.wantOff(dlog.WithField(ctx, "wants", "referencing Inode"), + body.Root, + body.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + body.Root, + body.ObjectID, + btrfsitem.EXTENT_DATA_KEY, + uint64(body.Offset)) + case btrfsitem.FileExtent: + o.wantOff(dlog.WithField(ctx, "wants", "containing Inode"), + treeID, + item.Key.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + switch body.Type { + case btrfsitem.FILE_EXTENT_INLINE: + // nothing + case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: + // TODO: Check if inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) + o.wantCSum(dlog.WithField(ctx, "wants", "data sum"), + roundDown(body.BodyExtent.DiskByteNr, btrfssum.BlockSize), + roundUp(body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes), btrfssum.BlockSize)) + default: + o.err(ctx, fmt.Errorf("FileExtent: unexpected body.Type: %v", body.Type)) + } + case btrfsitem.FreeSpaceBitmap: + o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceInfo"), + treeID, + item.Key.ObjectID, + btrfsitem.FREE_SPACE_INFO_KEY, + item.Key.Offset) + case btrfsitem.FreeSpaceHeader: + o.wantOff(dlog.WithField(ctx, "wants", ".Location"), + treeID, + body.Location.ObjectID, + body.Location.ItemType, + body.Location.Offset) + case btrfsitem.FreeSpaceInfo: + if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) { + o.wantOff(dlog.WithField(ctx, "wants", "FreeSpaceBitmap"), + treeID, + item.Key.ObjectID, + btrfsitem.FREE_SPACE_BITMAP_KEY, + item.Key.Offset) + } + case btrfsitem.Inode: + o.want(dlog.WithField(ctx, "wants", "backrefs"), + treeID, // TODO: validate the number of these against body.NLink + item.Key.ObjectID, + btrfsitem.INODE_REF_KEY) + o.wantFileExt(dlog.WithField(ctx, "wants", "FileExtents"), + treeID, item.Key.ObjectID, body.Size) + if body.BlockGroup != 0 { + o.want(dlog.WithField(ctx, "wants", "BlockGroup"), + btrfsprim.EXTENT_TREE_OBJECTID, + body.BlockGroup, + btrfsitem.BLOCK_GROUP_ITEM_KEY) + } + case btrfsitem.InodeRefs: + o.wantOff(dlog.WithField(ctx, "wants", "child Inode"), + treeID, + item.Key.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + o.wantOff(dlog.WithField(ctx, "wants", "parent Inode"), + treeID, + btrfsprim.ObjID(item.Key.Offset), + btrfsitem.INODE_ITEM_KEY, + 0) + for _, ref := range body { + o.wantOff(dlog.WithField(ctx, "wants", "DIR_ITEM"), + treeID, + btrfsprim.ObjID(item.Key.Offset), + btrfsitem.DIR_ITEM_KEY, + btrfsitem.NameHash(ref.Name)) + o.wantOff(dlog.WithField(ctx, "wants", "DIR_INDEX"), + treeID, + btrfsprim.ObjID(item.Key.Offset), + btrfsitem.DIR_INDEX_KEY, + uint64(ref.Index)) + } + case btrfsitem.Metadata: + for _, ref := range body.Refs { + switch refBody := ref.Body.(type) { + case nil: + // nothing + case btrfsitem.ExtentDataRef: + o.wantOff(dlog.WithField(ctx, "wants", "referencing INode"), + refBody.Root, + refBody.ObjectID, + btrfsitem.INODE_ITEM_KEY, + 0) + o.wantOff(dlog.WithField(ctx, "wants", "referencing FileExtent"), + refBody.Root, + refBody.ObjectID, + btrfsitem.EXTENT_DATA_KEY, + uint64(refBody.Offset)) + case btrfsitem.SharedDataRef: + // nothing + default: + panic("should not happen") + } + } + case btrfsitem.Root: + if body.RootDirID != 0 { + o.wantOff(dlog.WithField(ctx, "wants", "root directory"), + item.Key.ObjectID, + body.RootDirID, + btrfsitem.INODE_ITEM_KEY, + 0) + } + case btrfsitem.RootRef: + var otherType btrfsprim.ItemType + var parent, child btrfsprim.ObjID + switch item.Key.ItemType { + case btrfsitem.ROOT_REF_KEY: + otherType = btrfsitem.ROOT_BACKREF_KEY + parent = item.Key.ObjectID + child = btrfsprim.ObjID(item.Key.Offset) + case btrfsitem.ROOT_BACKREF_KEY: + otherType = btrfsitem.ROOT_REF_KEY + parent = btrfsprim.ObjID(item.Key.Offset) + child = item.Key.ObjectID + default: + panic("should not happen") + } + // sibling + o.wantOff(dlog.WithField(ctx, "wants", fmt.Sprintf("corresponding %v", otherType)), + treeID, + btrfsprim.ObjID(item.Key.Offset), + otherType, + uint64(item.Key.ObjectID)) + // parent + o.want(dlog.WithField(ctx, "wants", "parent subvolume: Root"), + treeID, + parent, + btrfsitem.ROOT_ITEM_KEY) + o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: Inode of parent dir"), + parent, + body.DirID, + btrfsitem.INODE_ITEM_KEY, + 0) + o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_ITEM in parent dir"), + parent, + body.DirID, + btrfsitem.DIR_ITEM_KEY, + btrfsitem.NameHash(body.Name)) + o.wantOff(dlog.WithField(ctx, "wants", "parent subvolume: DIR_INDEX in parent dir"), + parent, + body.DirID, + btrfsitem.DIR_INDEX_KEY, + uint64(body.Sequence)) + // child + o.want(dlog.WithField(ctx, "wants", "child subvolume: Root"), + treeID, + child, + btrfsitem.ROOT_ITEM_KEY) + case btrfsitem.SharedDataRef: + o.want(dlog.WithField(ctx, "wants", "Extent"), + btrfsprim.EXTENT_TREE_OBJECTID, + item.Key.ObjectID, + btrfsitem.EXTENT_ITEM_KEY) + case btrfsitem.UUIDMap: + o.want(dlog.WithField(ctx, "wants", "subvolume Root"), + btrfsprim.ROOT_TREE_OBJECTID, + body.ObjID, + btrfsitem.ROOT_ITEM_KEY) + default: + panic(fmt.Errorf("unsupported item type: %T", body)) + } +} diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go deleted file mode 100644 index 42228ae..0000000 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/treeancestors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2022 Luke Shumaker -// -// SPDX-License-Identifier: GPL-2.0-or-later - -package rebuildnodes - -/* -import ( - "context" - - //"github.com/datawire/dlib/dlog" - - "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/containers" -) - -func getTreeAncestors(ctx context.Context, scanData scanResult) map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID] { - treeAncestors := make(map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]) - - for laddr, node := range scanData.Nodes { - if _, ok := treeAncestors[node.Owner]; !ok { - treeAncestors[node.Owner] = make(containers.Set[btrfsprim.ObjID]) - } - for _, edge := range scanData.EdgesTo[laddr] { - if edge.FromTree != node.Owner { - if err := checkNodeExpectations(*edge, node); err != nil { - //dlog.Errorf(ctx, "... ignoring keypointer %v because %v", edge.String(), err) - } else { - treeAncestors[node.Owner].Insert(edge.FromTree) - } - } - } - } - - return treeAncestors -} - -func (m uuidMap) considerAncestors(ctx context.Context, treeAncestors map[btrfsprim.ObjID]containers.Set[btrfsprim.ObjID]) { - if missing := m.missingRootItems(); len(missing) == 0 { - return - } else { - dlog.Infof(ctx, "Rebuilding %d root items...", len(missing)) - } - - fa := newFullAncestorLister(m, treeAncestors) - - for _, missingRoot := range maps.SortedKeys(m.missingRootItems()) { - if _, ok := m.TreeParent[missingRoot]; ok { - // This one is incomplete because it doesn't have a UUID, not - // because it doesn't have a parent. - continue - } - potentialParents := make(containers.Set[btrfsprim.ObjID]) - potentialParents.InsertFrom(fa.GetFullAncestors(missingRoot)) - for _, ancestor := range maps.SortedKeys(fa.GetFullAncestors(missingRoot)) { - potentialParents.DeleteFrom(fa.GetFullAncestors(ancestor)) - } - if len(potentialParents) == 1 { - parent := potentialParents.TakeOne() - dlog.Infof(ctx, "... the parent of %v is %v", missingRoot, parent) - parentUUID, ok := m.ObjID2UUID[parent] - if !ok { - dlog.Errorf(ctx, "... but can't synthesize a root item because UUID of %v is unknown", parent) - continue - } - m.TreeParent[missingRoot] = parentUUID - } - } - - if missing := m.missingRootItems(); len(missing) > 0 { - dlog.Errorf(ctx, "... could not rebuild root items for %d trees: %v", len(missing), maps.SortedKeys(missing)) - } else { - dlog.Info(ctx, "... rebuilt all root items") - } -} -*/ diff --git a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go index f1033e6..bdaa0c4 100644 --- a/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go +++ b/lib/btrfsprogs/btrfsinspect/rebuildnodes/util.go @@ -7,6 +7,8 @@ package rebuildnodes import ( "fmt" + "golang.org/x/exp/constraints" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" ) @@ -18,26 +20,6 @@ func maybeSet[K, V comparable](name string, m map[K]V, k K, v V) error { return nil } -/* -var maxKey = btrfsprim.Key{ - ObjectID: math.MaxUint64, - ItemType: math.MaxUint8, - Offset: math.MaxUint64, -} - -func keyMm(key btrfsprim.Key) btrfsprim.Key { - switch { - case key.Offset > 0: - key.Offset-- - case key.ItemType > 0: - key.ItemType-- - case key.ObjectID > 0: - key.ObjectID-- - } - return key -} -*/ - func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int { var cnt int for _, devResults := range nodeScanResults { @@ -45,3 +27,11 @@ func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int { } return cnt } + +func roundDown[T constraints.Integer](n, d T) T { + return (n / d) * d +} + +func roundUp[T constraints.Integer](n, d T) T { + return ((n + d - 1) / d) * d +} diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsprogs/btrfsutil/broken_btree.go index 0660a46..8833937 100644 --- a/lib/btrfsprogs/btrfsutil/broken_btree.go +++ b/lib/btrfsprogs/btrfsutil/broken_btree.go @@ -94,6 +94,7 @@ func NewBrokenTrees(ctx context.Context, inner *btrfs.FS) interface { btrfstree.TreeOperator Superblock() (*btrfstree.Superblock, error) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) + Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) } { return &brokenTrees{ ctx: ctx, @@ -315,3 +316,8 @@ func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) { func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { return bt.inner.ReadAt(p, off) } + +func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) { + // TODO + return nil, nil +} diff --git a/scripts/main.sh b/scripts/main.sh index 383a9c8..2866baa 100755 --- a/scripts/main.sh +++ b/scripts/main.sh @@ -51,9 +51,9 @@ gen $b.gen/2.mappings.json \ # gen $b.gen/2.loops.txt \ # ./btrfs-rec --pv=$b.img --mappings=$b.gen/2.mappings.json \ # inspect show-loops $b.gen/0.scandevices.json -# gen $b.gen/2.nodes.json \ -# ./btrfs-rec --pv=$b.img --mappings=$b.gen/2.mappings.json \ -# inspect rebuild-nodes $b.gen/0.scandevices.json +gen $b.gen/3.nodes.json \ + ./btrfs-rec --pv=$b.img --mappings=$b.gen/2.mappings.json \ + inspect rebuild-nodes $b.gen/0.scandevices.json gen $b.gen/4.ls-files.txt \ ./btrfs-rec --pv=$b.img --mappings=$b.gen/2.mappings.json \ -- cgit v1.2.3-54-g00ecf From 168e5afbf903fb17ca4680deae2b2716d11f1d03 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sat, 26 Nov 2022 11:56:32 -0700 Subject: broken_btree: Implement .Augment() --- lib/btrfsprogs/btrfsutil/broken_btree.go | 91 ++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 33 deletions(-) (limited to 'lib/btrfsprogs/btrfsutil') diff --git a/lib/btrfsprogs/btrfsutil/broken_btree.go b/lib/btrfsprogs/btrfsutil/broken_btree.go index 8833937..e44c9e1 100644 --- a/lib/btrfsprogs/btrfsutil/broken_btree.go +++ b/lib/btrfsprogs/btrfsutil/broken_btree.go @@ -145,37 +145,7 @@ func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex { cacheEntry.TreeRootErr = err } else { dlog.Infof(bt.ctx, "indexing tree %v...", treeID) - btrfstree.TreeOperatorImpl{NodeSource: bt.inner}.RawTreeWalk( - bt.ctx, - *treeRoot, - func(err *btrfstree.TreeError) { - if len(err.Path) > 0 && err.Path.Node(-1).ToNodeAddr == 0 { - // This is a panic because on the filesystems I'm working with it more likely - // indicates a bug in my item parser than a problem with the filesystem. - panic(fmt.Errorf("TODO: error parsing item: %w", err)) - } - cacheEntry.Errors.Insert(treeIndexError{ - Path: bt.arena.Deflate(err.Path), - Err: err.Err, - }) - }, - btrfstree.TreeWalkHandler{ - Item: func(path btrfstree.TreePath, item btrfstree.Item) error { - if cacheEntry.Items.Lookup(item.Key) != nil { - // This is a panic because I'm not really sure what the best way to - // handle this is, and so if this happens I want the program to crash - // and force me to figure out how to handle it. - panic(fmt.Errorf("dup key=%v in tree=%v", item.Key, treeID)) - } - cacheEntry.Items.Insert(treeIndexValue{ - Path: bt.arena.Deflate(path), - Key: item.Key, - ItemSize: item.BodySize, - }) - return nil - }, - }, - ) + bt.rawTreeWalk(*treeRoot, cacheEntry, nil) dlog.Infof(bt.ctx, "... done indexing tree %v", treeID) } if treeID == btrfsprim.ROOT_TREE_OBJECTID { @@ -186,6 +156,43 @@ func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex { return cacheEntry } +func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex, walked *[]btrfsprim.Key) { + btrfstree.TreeOperatorImpl{NodeSource: bt.inner}.RawTreeWalk( + bt.ctx, + root, + func(err *btrfstree.TreeError) { + if len(err.Path) > 0 && err.Path.Node(-1).ToNodeAddr == 0 { + // This is a panic because on the filesystems I'm working with it more likely + // indicates a bug in my item parser than a problem with the filesystem. + panic(fmt.Errorf("TODO: error parsing item: %w", err)) + } + cacheEntry.Errors.Insert(treeIndexError{ + Path: bt.arena.Deflate(err.Path), + Err: err.Err, + }) + }, + btrfstree.TreeWalkHandler{ + Item: func(path btrfstree.TreePath, item btrfstree.Item) error { + if cacheEntry.Items.Lookup(item.Key) != nil { + // This is a panic because I'm not really sure what the best way to + // handle this is, and so if this happens I want the program to crash + // and force me to figure out how to handle it. + panic(fmt.Errorf("dup key=%v in tree=%v", item.Key, root.TreeID)) + } + cacheEntry.Items.Insert(treeIndexValue{ + Path: bt.arena.Deflate(path), + Key: item.Key, + ItemSize: item.BodySize, + }) + if walked != nil { + *walked = append(*walked, item.Key) + } + return nil + }, + }, + ) +} + func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) { item, err := bt.TreeSearch(treeID, btrfstree.KeySearch(key.Cmp)) if err != nil { @@ -318,6 +325,24 @@ func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { } func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) { - // TODO - return nil, nil + sb, err := bt.Superblock() + if err != nil { + return nil, err + } + index := bt.treeIndex(treeID) + if index.TreeRootErr != nil { + return nil, index.TreeRootErr + } + nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeAddr, btrfstree.NodeExpectations{}) + if err != nil { + return nil, err + } + var ret []btrfsprim.Key + bt.rawTreeWalk(btrfstree.TreeRoot{ + TreeID: treeID, + RootNode: nodeAddr, + Level: nodeRef.Data.Head.Level, + Generation: nodeRef.Data.Head.Generation, + }, index, &ret) + return ret, nil } -- cgit v1.2.3-54-g00ecf