summaryrefslogtreecommitdiff
path: root/cmd
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2023-03-17 23:54:56 -0400
committerLuke Shumaker <lukeshu@lukeshu.com>2023-03-17 23:54:56 -0400
commit0f96c9ce920875babd4cd23819a2fb2960dc0cc6 (patch)
treef50d5a547f354413f45b9a9d497af77a31a7d10b /cmd
parent0f85e72d1331b49b52925d6cc5ad083a0376104c (diff)
parent3fea600da8e033abb7e415694e53aaf0787ed95c (diff)
Merge branch 'lukeshu/api-cleanup'
Diffstat (limited to 'cmd')
-rw-r--r--cmd/btrfs-rec/inspect/dumptrees/print_tree.go15
-rw-r--r--cmd/btrfs-rec/inspect/lsfiles/lsfiles.go249
-rw-r--r--cmd/btrfs-rec/inspect/mount/mount.go35
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process.go15
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go10
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go15
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go15
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go24
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go16
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/scan.go51
-rw-r--r--cmd/btrfs-rec/inspect/rebuildmappings/sumrunwithgaps.go24
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go62
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go26
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go86
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go22
-rw-r--r--cmd/btrfs-rec/inspect/rebuildtrees/scan.go105
-rw-r--r--cmd/btrfs-rec/inspect_lsfiles.go228
-rw-r--r--cmd/btrfs-rec/inspect_lstrees.go13
-rw-r--r--cmd/btrfs-rec/inspect_spewitems.go4
19 files changed, 558 insertions, 457 deletions
diff --git a/cmd/btrfs-rec/inspect/dumptrees/print_tree.go b/cmd/btrfs-rec/inspect/dumptrees/print_tree.go
index a8c2adf..60303e9 100644
--- a/cmd/btrfs-rec/inspect/dumptrees/print_tree.go
+++ b/cmd/btrfs-rec/inspect/dumptrees/print_tree.go
@@ -19,7 +19,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
@@ -54,7 +53,7 @@ func DumpTrees(ctx context.Context, out io.Writer, fs *btrfs.FS) {
dlog.Error(ctx, err)
},
btrfstree.TreeWalkHandler{
- Item: func(_ btrfstree.TreePath, item btrfstree.Item) error {
+ Item: func(_ btrfstree.Path, item btrfstree.Item) error {
if item.Key.ItemType != btrfsitem.ROOT_ITEM_KEY {
return nil
}
@@ -100,12 +99,12 @@ var nodeHeaderSize = binstruct.StaticSize(btrfstree.NodeHeader{})
func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfsprim.ObjID) {
var itemOffset uint32
handlers := btrfstree.TreeWalkHandler{
- Node: func(path btrfstree.TreePath, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
- printHeaderInfo(out, nodeRef.Data)
- itemOffset = nodeRef.Data.Size - uint32(nodeHeaderSize)
+ Node: func(path btrfstree.Path, node *btrfstree.Node) error {
+ printHeaderInfo(out, node)
+ itemOffset = node.Size - uint32(nodeHeaderSize)
return nil
},
- PreKeyPointer: func(path btrfstree.TreePath, item btrfstree.KeyPointer) error {
+ PreKeyPointer: func(path btrfstree.Path, item btrfstree.KeyPointer) error {
treeID := path[0].FromTree
textui.Fprintf(out, "\tkey %v block %v gen %v\n",
item.Key.Format(treeID),
@@ -113,7 +112,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
item.Generation)
return nil
},
- Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
+ Item: func(path btrfstree.Path, item btrfstree.Item) error {
treeID := path[0].FromTree
i := path.Node(-1).FromItemSlot
bs, _ := binstruct.Marshal(item.Body)
@@ -375,7 +374,7 @@ func printTree(ctx context.Context, out io.Writer, fs *btrfs.FS, treeID btrfspri
}
// printHeaderInfo mimics btrfs-progs kernel-shared/print-tree.c:print_header_info()
-func printHeaderInfo(out io.Writer, node btrfstree.Node) {
+func printHeaderInfo(out io.Writer, node *btrfstree.Node) {
var typename string
if node.Head.Level > 0 { // interior node
typename = "node"
diff --git a/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go b/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go
new file mode 100644
index 0000000..a713b8a
--- /dev/null
+++ b/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go
@@ -0,0 +1,249 @@
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+// Package lsfiles is the guts of the `btrfs-rec inspect ls-files`
+// command, which prints a tree-listing of all files in the
+// filesystem.
+package lsfiles
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "path"
+ "strings"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
+ "git.lukeshu.com/btrfs-progs-ng/lib/maps"
+ "git.lukeshu.com/btrfs-progs-ng/lib/textui"
+)
+
+func LsFiles(
+ out io.Writer,
+ fs interface {
+ btrfstree.TreeOperator
+ Superblock() (*btrfstree.Superblock, error)
+ diskio.ReaderAt[btrfsvol.LogicalAddr]
+ },
+) (err error) {
+ defer func() {
+ if _err := derror.PanicToError(recover()); _err != nil {
+ textui.Fprintf(out, "\n\n%+v\n", _err)
+ err = _err
+ }
+ }()
+
+ printSubvol(out, "", true, "/", btrfs.NewSubvolume(
+ fs,
+ btrfsprim.FS_TREE_OBJECTID,
+ false,
+ ))
+
+ return nil
+}
+
+const (
+ tS = "    "
+ tl = "│   "
+ tT = "├── "
+ tL = "└── "
+)
+
+func printText(out io.Writer, prefix string, isLast bool, name, text string) {
+ first, rest := tT, tl
+ if isLast {
+ first, rest = tL, tS
+ }
+ for i, line := range strings.Split(textui.Sprintf("%q %s", name, text), "\n") {
+ _, _ = io.WriteString(out, prefix)
+ if i == 0 {
+ _, _ = io.WriteString(out, first)
+ } else {
+ _, _ = io.WriteString(out, rest)
+ }
+ _, _ = io.WriteString(out, line)
+ _, _ = io.WriteString(out, "\n")
+ }
+}
+
+func printSubvol(out io.Writer, prefix string, isLast bool, name string, subvol *btrfs.Subvolume) {
+ rootInode, err := subvol.GetRootInode()
+ if err != nil {
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
+ subvol.TreeID, fmtErr(err)))
+ return
+ }
+ dir, err := subvol.LoadDir(rootInode)
+ if err != nil {
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
+ subvol.TreeID, fmtErr(err)))
+ return
+ }
+ if name == "/" {
+ printDir(out, prefix, isLast, name, dir)
+ return
+ }
+ printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v", subvol.TreeID))
+ if isLast {
+ prefix += tS
+ } else {
+ prefix += tl
+ }
+ printDir(out, prefix, true, name, dir)
+}
+
+func fmtErr(err error) string {
+ errStr := err.Error()
+ if strings.Contains(errStr, "\n") {
+ errStr = "\\\n" + errStr
+ }
+ return errStr
+}
+
+func fmtInode(inode btrfs.BareInode) string {
+ var mode btrfsitem.StatMode
+ if inode.InodeItem == nil {
+ inode.Errs = append(inode.Errs, errors.New("missing INODE_ITEM"))
+ } else {
+ mode = inode.InodeItem.Mode
+ }
+ ret := textui.Sprintf("ino=%v mode=%v", inode.Inode, mode)
+ if len(inode.Errs) > 0 {
+ ret += " err=" + fmtErr(inode.Errs)
+ }
+ return ret
+}
+
+func printDir(out io.Writer, prefix string, isLast bool, name string, dir *btrfs.Dir) {
+ printText(out, prefix, isLast, name+"/", fmtInode(dir.BareInode))
+ if isLast {
+ prefix += tS
+ } else {
+ prefix += tl
+ }
+ for i, childName := range maps.SortedKeys(dir.ChildrenByName) {
+ printDirEntry(
+ out,
+ prefix,
+ i == len(dir.ChildrenByName)-1,
+ dir.SV,
+ path.Join(name, childName),
+ dir.ChildrenByName[childName])
+ }
+}
+
+func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subvolume, name string, entry btrfsitem.DirEntry) {
+ if len(entry.Data) != 0 {
+ panic(fmt.Errorf("TODO: I don't know how to handle dirent.data: %q", name))
+ }
+ switch entry.Type {
+ case btrfsitem.FT_DIR:
+ switch entry.Location.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ dir, err := subvol.LoadDir(entry.Location.ObjectID)
+ if err != nil {
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ return
+ }
+ printDir(out, prefix, isLast, name, dir)
+ case btrfsitem.ROOT_ITEM_KEY:
+ printSubvol(out, prefix, isLast, name, subvol.NewChildSubvolume(entry.Location.ObjectID))
+ default:
+ panic(fmt.Errorf("TODO: I don't know how to handle an FT_DIR with location.ItemType=%v: %q",
+ entry.Location.ItemType, name))
+ }
+ case btrfsitem.FT_SYMLINK:
+ if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
+ panic(fmt.Errorf("TODO: I don't know how to handle an FT_SYMLINK with location.ItemType=%v: %q",
+ entry.Location.ItemType, name))
+ }
+ file, err := subvol.LoadFile(entry.Location.ObjectID)
+ if err != nil {
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ return
+ }
+ printSymlink(out, prefix, isLast, name, file)
+ case btrfsitem.FT_REG_FILE:
+ if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
+ panic(fmt.Errorf("TODO: I don't know how to handle an FT_REG_FILE with location.ItemType=%v: %q",
+ entry.Location.ItemType, name))
+ }
+ file, err := subvol.LoadFile(entry.Location.ObjectID)
+ if err != nil {
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ return
+ }
+ printFile(out, prefix, isLast, name, file)
+ case btrfsitem.FT_SOCK:
+ if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
+ panic(fmt.Errorf("TODO: I don't know how to handle an FT_SOCK with location.ItemType=%v: %q",
+ entry.Location.ItemType, name))
+ }
+ file, err := subvol.LoadFile(entry.Location.ObjectID)
+ if err != nil {
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ return
+ }
+ printSocket(out, prefix, isLast, name, file)
+ case btrfsitem.FT_FIFO:
+ if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
+ panic(fmt.Errorf("TODO: I don't know how to handle an FT_FIFO with location.ItemType=%v: %q",
+ entry.Location.ItemType, name))
+ }
+ file, err := subvol.LoadFile(entry.Location.ObjectID)
+ if err != nil {
+ printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
+ return
+ }
+ printPipe(out, prefix, isLast, name, file)
+ default:
+ panic(fmt.Errorf("TODO: I don't know how to handle a fileType=%v: %q",
+ entry.Type, name))
+ }
+}
+
+func printSymlink(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
+ var tgt []byte
+ if file.InodeItem != nil {
+ var err error
+ tgt, err = io.ReadAll(io.NewSectionReader(file, 0, file.InodeItem.Size))
+ if err != nil {
+ file.Errs = append(file.Errs, err)
+ }
+ }
+ printText(out, prefix, isLast, name, textui.Sprintf(
+ "-> %q : %s",
+ tgt,
+ fmtInode(file.BareInode)))
+}
+
+func printFile(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
+ if file.InodeItem != nil {
+ if _, err := io.Copy(io.Discard, io.NewSectionReader(file, 0, file.InodeItem.Size)); err != nil {
+ file.Errs = append(file.Errs, err)
+ }
+ }
+ printText(out, prefix, isLast, name, fmtInode(file.BareInode))
+}
+
+func printSocket(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
+ if file.InodeItem != nil && file.InodeItem.Size > 0 {
+ panic(fmt.Errorf("TODO: I don't know how to handle a socket with size>0: %q", name))
+ }
+ printText(out, prefix, isLast, name, fmtInode(file.BareInode))
+}
+
+func printPipe(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
+ if file.InodeItem != nil && file.InodeItem.Size > 0 {
+ panic(fmt.Errorf("TODO: I don't know how to handle a pipe with size>0: %q", name))
+ }
+ printText(out, prefix, isLast, name, fmtInode(file.BareInode))
+}
diff --git a/cmd/btrfs-rec/inspect/mount/mount.go b/cmd/btrfs-rec/inspect/mount/mount.go
index 0e8faf1..4049393 100644
--- a/cmd/btrfs-rec/inspect/mount/mount.go
+++ b/cmd/btrfs-rec/inspect/mount/mount.go
@@ -28,6 +28,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
@@ -45,14 +46,21 @@ func MountRO(ctx context.Context, fs *btrfs.FS, mountpoint string, noChecksums b
deviceName = abs
}
+ sb, err := fs.Superblock()
+ if err != nil {
+ return err
+ }
+
rootSubvol := &subvolume{
- Subvolume: btrfs.Subvolume{
- FS: btrfsutil.NewOldRebuiltForrest(ctx, fs),
- TreeID: btrfsprim.FS_TREE_OBJECTID,
- NoChecksums: noChecksums,
- },
+ Subvolume: btrfs.NewSubvolume(
+ btrfsutil.NewOldRebuiltForrest(ctx, fs),
+ btrfsprim.FS_TREE_OBJECTID,
+ noChecksums,
+ ),
DeviceName: deviceName,
Mountpoint: mountpoint,
+
+ sb: sb,
}
return rootSubvol.Run(ctx)
}
@@ -107,10 +115,12 @@ type fileState struct {
}
type subvolume struct {
- btrfs.Subvolume
+ *btrfs.Subvolume
DeviceName string
Mountpoint string
+ sb *btrfstree.Superblock
+
fuseutil.NotImplementedFileSystem
lastHandle uint64
dirHandles typedsync.Map[fuseops.HandleID, *dirState]
@@ -189,11 +199,8 @@ func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error)
workerName := fmt.Sprintf("%d-%s", val.Inode, filepath.Base(subMountpoint))
sv.grp.Go(workerName, func(ctx context.Context) error {
subSv := &subvolume{
- Subvolume: btrfs.Subvolume{
- FS: sv.FS,
- TreeID: entry.Location.ObjectID,
- NoChecksums: sv.NoChecksums,
- },
+ sb: sv.sb,
+ Subvolume: sv.NewChildSubvolume(entry.Location.ObjectID),
DeviceName: sv.DeviceName,
Mountpoint: filepath.Join(sv.Mountpoint, subMountpoint[1:]),
}
@@ -208,11 +215,9 @@ func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error)
}
func (sv *subvolume) StatFS(_ context.Context, op *fuseops.StatFSOp) error {
+ sb := sv.sb
+
// See linux.git/fs/btrfs/super.c:btrfs_statfs()
- sb, err := sv.FS.Superblock()
- if err != nil {
- return err
- }
op.IoSize = sb.SectorSize
op.BlockSize = sb.SectorSize
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process.go b/cmd/btrfs-rec/inspect/rebuildmappings/process.go
index a93b697..7a49cc6 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process.go
@@ -117,7 +117,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR
// First dedup them, because they change for allocations and
// CoW means that they'll bounce around a lot, so you likely
// have oodles of duplicates?
- bgs, err := DedupBlockGroups(scanResults)
+ bgs, err := dedupedBlockGroups(scanResults)
if err != nil {
return err
}
@@ -137,10 +137,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR
PAddr: otherPAddr.Add(-offsetWithinChunk),
Size: bg.Size,
SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: bg.Flags,
- },
+ Flags: containers.OptionalValue(bg.Flags),
}
if err := fs.LV.AddMapping(mapping); err != nil {
dlog.Errorf(ctx, "error: adding flags from blockgroup: %v", err)
@@ -162,8 +159,8 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR
// slower.
ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "5/6")
dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs))
- physicalSums := ExtractPhysicalSums(scanResults)
- logicalSums := ExtractLogicalSums(ctx, scanResults)
+ physicalSums := extractPhysicalSums(scanResults)
+ logicalSums := extractLogicalSums(ctx, scanResults)
if err := matchBlockGroupSumsExact(ctx, fs, bgs, physicalSums, logicalSums); err != nil {
return err
}
@@ -179,7 +176,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR
ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "report")
dlog.Info(_ctx, "report:")
- unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs)
+ unmappedPhysicalRegions := listUnmappedPhysicalRegions(fs)
var unmappedPhysical btrfsvol.AddrDelta
var numUnmappedPhysical int
for _, devRegions := range unmappedPhysicalRegions {
@@ -190,7 +187,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR
}
dlog.Infof(ctx, "... %d of unmapped physical space (across %d regions)", textui.IEC(unmappedPhysical, "B"), numUnmappedPhysical)
- unmappedLogicalRegions := ListUnmappedLogicalRegions(fs, logicalSums)
+ unmappedLogicalRegions := listUnmappedLogicalRegions(fs, logicalSums)
var unmappedLogical btrfsvol.AddrDelta
for _, region := range unmappedLogicalRegions {
unmappedLogical += region.Size()
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go
index f8d2337..e7cdf0e 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go
@@ -13,18 +13,18 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
)
-type BlockGroup struct {
+type blockGroup struct {
LAddr btrfsvol.LogicalAddr
Size btrfsvol.AddrDelta
Flags btrfsvol.BlockGroupFlags
}
-func DedupBlockGroups(scanResults ScanDevicesResult) (map[btrfsvol.LogicalAddr]BlockGroup, error) {
+func dedupedBlockGroups(scanResults ScanDevicesResult) (map[btrfsvol.LogicalAddr]blockGroup, error) {
// Dedup
- bgsSet := make(containers.Set[BlockGroup])
+ bgsSet := make(containers.Set[blockGroup])
for _, devResults := range scanResults {
for _, bg := range devResults.FoundBlockGroups {
- bgsSet.Insert(BlockGroup{
+ bgsSet.Insert(blockGroup{
LAddr: btrfsvol.LogicalAddr(bg.Key.ObjectID),
Size: btrfsvol.AddrDelta(bg.Key.Offset),
Flags: bg.BG.Flags,
@@ -49,7 +49,7 @@ func DedupBlockGroups(scanResults ScanDevicesResult) (map[btrfsvol.LogicalAddr]B
// Return. We return a map instead of a slice in order to
// facilitate easy deletes.
- bgsMap := make(map[btrfsvol.LogicalAddr]BlockGroup, len(bgsSet))
+ bgsMap := make(map[btrfsvol.LogicalAddr]blockGroup, len(bgsSet))
for bg := range bgsSet {
bgsMap[bg.LAddr] = bg
}
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go
index 533ae67..5148e5c 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go
@@ -19,15 +19,15 @@ import (
func matchBlockGroupSumsExact(ctx context.Context,
fs *btrfs.FS,
- blockgroups map[btrfsvol.LogicalAddr]BlockGroup,
+ blockgroups map[btrfsvol.LogicalAddr]blockGroup,
physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- logicalSums SumRunWithGaps[btrfsvol.LogicalAddr],
+ logicalSums sumRunWithGaps[btrfsvol.LogicalAddr],
) error {
- regions := ListUnmappedPhysicalRegions(fs)
+ regions := listUnmappedPhysicalRegions(fs)
numBlockgroups := len(blockgroups)
for i, bgLAddr := range maps.SortedKeys(blockgroups) {
blockgroup := blockgroups[bgLAddr]
- bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
+ bgRun := sumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
if len(bgRun.Runs) == 0 {
dlog.Errorf(ctx, "(%v/%v) blockgroup[laddr=%v] can't be matched because it has 0 runs",
i+1, numBlockgroups, bgLAddr)
@@ -35,7 +35,7 @@ func matchBlockGroupSumsExact(ctx context.Context,
}
var matches []btrfsvol.QualifiedPhysicalAddr
- if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
+ if err := walkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
rawMatches := indexAll[int, btrfssum.ShortSum](region, bgRun)
for _, match := range rawMatches {
matches = append(matches, btrfsvol.QualifiedPhysicalAddr{
@@ -63,10 +63,7 @@ func matchBlockGroupSumsExact(ctx context.Context,
PAddr: matches[0],
Size: blockgroup.Size,
SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: blockgroup.Flags,
- },
+ Flags: containers.OptionalValue(blockgroup.Flags),
}
if err := fs.LV.AddMapping(mapping); err != nil {
dlog.Errorf(ctx, "error: %v", err)
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go
index 00f367f..f3557cd 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go
@@ -39,17 +39,17 @@ func (a fuzzyRecord) Compare(b fuzzyRecord) int {
func matchBlockGroupSumsFuzzy(ctx context.Context,
fs *btrfs.FS,
- blockgroups map[btrfsvol.LogicalAddr]BlockGroup,
+ blockgroups map[btrfsvol.LogicalAddr]blockGroup,
physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- logicalSums SumRunWithGaps[btrfsvol.LogicalAddr],
+ logicalSums sumRunWithGaps[btrfsvol.LogicalAddr],
) error {
_ctx := ctx
ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.substep", "indexing")
dlog.Info(ctx, "Indexing physical regions...") // O(m)
- regions := ListUnmappedPhysicalRegions(fs)
+ regions := listUnmappedPhysicalRegions(fs)
physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr)
- if err := WalkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
+ if err := walkUnmappedPhysicalRegions(ctx, physicalSums, regions, func(devID btrfsvol.DeviceID, region btrfssum.SumRun[btrfsvol.PhysicalAddr]) error {
return region.Walk(ctx, func(paddr btrfsvol.PhysicalAddr, sum btrfssum.ShortSum) error {
physicalIndex[sum] = append(physicalIndex[sum], btrfsvol.QualifiedPhysicalAddr{
Dev: devID,
@@ -67,7 +67,7 @@ func matchBlockGroupSumsFuzzy(ctx context.Context,
numBlockgroups := len(blockgroups)
for i, bgLAddr := range maps.SortedKeys(blockgroups) {
blockgroup := blockgroups[bgLAddr]
- bgRun := SumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
+ bgRun := sumsForLogicalRegion(logicalSums, blockgroup.LAddr, blockgroup.Size)
d := bgRun.PatLen()
matches := make(map[btrfsvol.QualifiedPhysicalAddr]int)
@@ -123,10 +123,7 @@ func matchBlockGroupSumsFuzzy(ctx context.Context,
PAddr: best.Dat[0].PAddr,
Size: blockgroup.Size,
SizeLocked: true,
- Flags: containers.Optional[btrfsvol.BlockGroupFlags]{
- OK: true,
- Val: blockgroup.Flags,
- },
+ Flags: containers.OptionalValue(blockgroup.Flags),
}
if err := fs.LV.AddMapping(mapping); err != nil {
dlog.Errorf(ctx, "error: %v", err)
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go
index 2cdabb7..52f8252 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go
@@ -19,8 +19,8 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
)
-func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumRunWithGaps[btrfsvol.LogicalAddr] {
- var records []SysExtentCSum
+func extractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) sumRunWithGaps[btrfsvol.LogicalAddr] {
+ var records []FoundExtentCSum
for _, devResults := range scanResults {
records = append(records, devResults.FoundExtentCSums...)
}
@@ -37,7 +37,7 @@ func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumR
}
})
if len(records) == 0 {
- return SumRunWithGaps[btrfsvol.LogicalAddr]{}
+ return sumRunWithGaps[btrfsvol.LogicalAddr]{}
}
sumSize := records[0].Sums.ChecksumSize
@@ -52,10 +52,10 @@ func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumR
// "AAAAAAA" shouldn't be present, and if we just discard "BBBBBBBB"
// because it conflicts with "CCCCCCC", then we would erroneously
// include "AAAAAAA".
- addrspace := new(containers.RBTree[SysExtentCSum])
+ addrspace := new(containers.RBTree[FoundExtentCSum])
for _, newRecord := range records {
for {
- conflict := addrspace.Search(func(oldRecord SysExtentCSum) int {
+ conflict := addrspace.Search(func(oldRecord FoundExtentCSum) int {
switch {
case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) <= oldRecord.Sums.Addr:
// 'newRecord' is wholly to the left of 'oldRecord'.
@@ -127,7 +127,7 @@ func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumR
case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) > overlapEnd:
suffix = newRecord.Sums.Sums[newOverlapEnd:]
}
- unionRecord := SysExtentCSum{
+ unionRecord := FoundExtentCSum{
Generation: oldRecord.Generation,
Sums: btrfsitem.ExtentCSum{
SumRun: btrfssum.SumRun[btrfsvol.LogicalAddr]{
@@ -143,11 +143,11 @@ func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumR
}
}
- // Now flatten that RBTree in to a SumRunWithGaps.
- var flattened SumRunWithGaps[btrfsvol.LogicalAddr]
+ // Now flatten that RBTree in to a sumRunWithGaps.
+ var flattened sumRunWithGaps[btrfsvol.LogicalAddr]
var curAddr btrfsvol.LogicalAddr
var curSums strings.Builder
- addrspace.Range(func(node *containers.RBNode[SysExtentCSum]) bool {
+ addrspace.Range(func(node *containers.RBNode[FoundExtentCSum]) bool {
curEnd := curAddr + (btrfsvol.LogicalAddr(curSums.Len()/sumSize) * btrfssum.BlockSize)
if node.Value.Sums.Addr != curEnd {
if curSums.Len() > 0 {
@@ -178,7 +178,7 @@ func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumR
return flattened
}
-func ListUnmappedLogicalRegions(fs *btrfs.FS, logicalSums SumRunWithGaps[btrfsvol.LogicalAddr]) []btrfssum.SumRun[btrfsvol.LogicalAddr] {
+func listUnmappedLogicalRegions(fs *btrfs.FS, logicalSums sumRunWithGaps[btrfsvol.LogicalAddr]) []btrfssum.SumRun[btrfsvol.LogicalAddr] {
// There are a lot of ways this algorithm could be made
// faster.
var ret []btrfssum.SumRun[btrfsvol.LogicalAddr]
@@ -221,8 +221,8 @@ func ListUnmappedLogicalRegions(fs *btrfs.FS, logicalSums SumRunWithGaps[btrfsvo
return ret
}
-func SumsForLogicalRegion(sums SumRunWithGaps[btrfsvol.LogicalAddr], beg btrfsvol.LogicalAddr, size btrfsvol.AddrDelta) SumRunWithGaps[btrfsvol.LogicalAddr] {
- runs := SumRunWithGaps[btrfsvol.LogicalAddr]{
+func sumsForLogicalRegion(sums sumRunWithGaps[btrfsvol.LogicalAddr], beg btrfsvol.LogicalAddr, size btrfsvol.AddrDelta) sumRunWithGaps[btrfsvol.LogicalAddr] {
+ runs := sumRunWithGaps[btrfsvol.LogicalAddr]{
Addr: beg,
Size: size,
}
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go
index 392ded9..5f8d932 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go
@@ -16,7 +16,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
)
-func ExtractPhysicalSums(scanResults ScanDevicesResult) map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr] {
+func extractPhysicalSums(scanResults ScanDevicesResult) map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr] {
ret := make(map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], len(scanResults))
for devID, devResults := range scanResults {
ret[devID] = devResults.Checksums
@@ -24,12 +24,12 @@ func ExtractPhysicalSums(scanResults ScanDevicesResult) map[btrfsvol.DeviceID]bt
return ret
}
-type PhysicalRegion struct {
+type physicalRegion struct {
Beg, End btrfsvol.PhysicalAddr
}
-func ListUnmappedPhysicalRegions(fs *btrfs.FS) map[btrfsvol.DeviceID][]PhysicalRegion {
- regions := make(map[btrfsvol.DeviceID][]PhysicalRegion)
+func listUnmappedPhysicalRegions(fs *btrfs.FS) map[btrfsvol.DeviceID][]physicalRegion {
+ regions := make(map[btrfsvol.DeviceID][]physicalRegion)
pos := make(map[btrfsvol.DeviceID]btrfsvol.PhysicalAddr)
mappings := fs.LV.Mappings()
sort.Slice(mappings, func(i, j int) bool {
@@ -37,7 +37,7 @@ func ListUnmappedPhysicalRegions(fs *btrfs.FS) map[btrfsvol.DeviceID][]PhysicalR
})
for _, mapping := range mappings {
if pos[mapping.PAddr.Dev] < mapping.PAddr.Addr {
- regions[mapping.PAddr.Dev] = append(regions[mapping.PAddr.Dev], PhysicalRegion{
+ regions[mapping.PAddr.Dev] = append(regions[mapping.PAddr.Dev], physicalRegion{
Beg: pos[mapping.PAddr.Dev],
End: mapping.PAddr.Addr,
})
@@ -49,7 +49,7 @@ func ListUnmappedPhysicalRegions(fs *btrfs.FS) map[btrfsvol.DeviceID][]PhysicalR
for devID, dev := range fs.LV.PhysicalVolumes() {
devSize := dev.Size()
if pos[devID] < devSize {
- regions[devID] = append(regions[devID], PhysicalRegion{
+ regions[devID] = append(regions[devID], physicalRegion{
Beg: pos[devID],
End: devSize,
})
@@ -62,9 +62,9 @@ func roundUp[T constraints.Integer](x, multiple T) T {
return ((x + multiple - 1) / multiple) * multiple
}
-func WalkUnmappedPhysicalRegions(ctx context.Context,
+func walkUnmappedPhysicalRegions(ctx context.Context,
physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr],
- gaps map[btrfsvol.DeviceID][]PhysicalRegion,
+ gaps map[btrfsvol.DeviceID][]physicalRegion,
fn func(btrfsvol.DeviceID, btrfssum.SumRun[btrfsvol.PhysicalAddr]) error,
) error {
for _, devID := range maps.SortedKeys(gaps) {
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go
index b88f01c..76d8a75 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go
@@ -19,7 +19,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
@@ -30,29 +29,31 @@ type ScanDevicesResult = map[btrfsvol.DeviceID]ScanOneDeviceResult
type ScanOneDeviceResult struct {
Checksums btrfssum.SumRun[btrfsvol.PhysicalAddr]
FoundNodes map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr
- FoundChunks []btrfstree.SysChunk
- FoundBlockGroups []SysBlockGroup
- FoundDevExtents []SysDevExtent
- FoundExtentCSums []SysExtentCSum
+ FoundChunks []FoundChunk
+ FoundBlockGroups []FoundBlockGroup
+ FoundDevExtents []FoundDevExtent
+ FoundExtentCSums []FoundExtentCSum
}
-type SysBlockGroup struct {
+type FoundChunk = btrfstree.SysChunk
+
+type FoundBlockGroup struct {
Key btrfsprim.Key
BG btrfsitem.BlockGroup
}
-type SysDevExtent struct {
+type FoundDevExtent struct {
Key btrfsprim.Key
DevExt btrfsitem.DevExtent
}
-type SysExtentCSum struct {
+type FoundExtentCSum struct {
Generation btrfsprim.Generation
Sums btrfsitem.ExtentCSum
}
// Compare implements containers.Ordered.
-func (a SysExtentCSum) Compare(b SysExtentCSum) int {
+func (a FoundExtentCSum) Compare(b FoundExtentCSum) int {
return containers.NativeCompare(a.Sums.Addr, b.Sums.Addr)
}
@@ -121,22 +122,22 @@ func (scanner *deviceScanner) ScanSector(_ context.Context, dev *btrfs.Device, p
return nil
}
-func (scanner *deviceScanner) ScanNode(ctx context.Context, nodeRef *diskio.Ref[btrfsvol.PhysicalAddr, btrfstree.Node]) error {
- scanner.result.FoundNodes[nodeRef.Data.Head.Addr] = append(scanner.result.FoundNodes[nodeRef.Data.Head.Addr], nodeRef.Addr)
- for i, item := range nodeRef.Data.BodyLeaf {
+func (scanner *deviceScanner) ScanNode(ctx context.Context, addr btrfsvol.PhysicalAddr, node *btrfstree.Node) error {
+ scanner.result.FoundNodes[node.Head.Addr] = append(scanner.result.FoundNodes[node.Head.Addr], addr)
+ for i, item := range node.BodyLeaf {
switch item.Key.ItemType {
case btrfsitem.CHUNK_ITEM_KEY:
switch itemBody := item.Body.(type) {
case *btrfsitem.Chunk:
dlog.Tracef(ctx, "node@%v: item %v: found chunk",
- nodeRef.Addr, i)
- scanner.result.FoundChunks = append(scanner.result.FoundChunks, btrfstree.SysChunk{
+ addr, i)
+ scanner.result.FoundChunks = append(scanner.result.FoundChunks, FoundChunk{
Key: item.Key,
Chunk: *itemBody,
})
case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed CHUNK_ITEM: %v",
- nodeRef.Addr, i, itemBody.Err)
+ addr, i, itemBody.Err)
default:
panic(fmt.Errorf("should not happen: CHUNK_ITEM has unexpected item type: %T", itemBody))
}
@@ -144,14 +145,14 @@ func (scanner *deviceScanner) ScanNode(ctx context.Context, nodeRef *diskio.Ref[
switch itemBody := item.Body.(type) {
case *btrfsitem.BlockGroup:
dlog.Tracef(ctx, "node@%v: item %v: found block group",
- nodeRef.Addr, i)
- scanner.result.FoundBlockGroups = append(scanner.result.FoundBlockGroups, SysBlockGroup{
+ addr, i)
+ scanner.result.FoundBlockGroups = append(scanner.result.FoundBlockGroups, FoundBlockGroup{
Key: item.Key,
BG: *itemBody,
})
case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed BLOCK_GROUP_ITEM: %v",
- nodeRef.Addr, i, itemBody.Err)
+ addr, i, itemBody.Err)
default:
panic(fmt.Errorf("should not happen: BLOCK_GROUP_ITEM has unexpected item type: %T", itemBody))
}
@@ -159,14 +160,14 @@ func (scanner *deviceScanner) ScanNode(ctx context.Context, nodeRef *diskio.Ref[
switch itemBody := item.Body.(type) {
case *btrfsitem.DevExtent:
dlog.Tracef(ctx, "node@%v: item %v: found dev extent",
- nodeRef.Addr, i)
- scanner.result.FoundDevExtents = append(scanner.result.FoundDevExtents, SysDevExtent{
+ addr, i)
+ scanner.result.FoundDevExtents = append(scanner.result.FoundDevExtents, FoundDevExtent{
Key: item.Key,
DevExt: *itemBody,
})
case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed DEV_EXTENT: %v",
- nodeRef.Addr, i, itemBody.Err)
+ addr, i, itemBody.Err)
default:
panic(fmt.Errorf("should not happen: DEV_EXTENT has unexpected item type: %T", itemBody))
}
@@ -174,14 +175,14 @@ func (scanner *deviceScanner) ScanNode(ctx context.Context, nodeRef *diskio.Ref[
switch itemBody := item.Body.(type) {
case *btrfsitem.ExtentCSum:
dlog.Tracef(ctx, "node@%v: item %v: found csums",
- nodeRef.Addr, i)
- scanner.result.FoundExtentCSums = append(scanner.result.FoundExtentCSums, SysExtentCSum{
- Generation: nodeRef.Data.Head.Generation,
+ addr, i)
+ scanner.result.FoundExtentCSums = append(scanner.result.FoundExtentCSums, FoundExtentCSum{
+ Generation: node.Head.Generation,
Sums: *itemBody,
})
case *btrfsitem.Error:
dlog.Errorf(ctx, "node@%v: item %v: error: malformed is EXTENT_CSUM: %v",
- nodeRef.Addr, i, itemBody.Err)
+ addr, i, itemBody.Err)
default:
panic(fmt.Errorf("should not happen: EXTENT_CSUM has unexpected item type: %T", itemBody))
}
diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/sumrunwithgaps.go b/cmd/btrfs-rec/inspect/rebuildmappings/sumrunwithgaps.go
index f79e2be..3522b3e 100644
--- a/cmd/btrfs-rec/inspect/rebuildmappings/sumrunwithgaps.go
+++ b/cmd/btrfs-rec/inspect/rebuildmappings/sumrunwithgaps.go
@@ -17,7 +17,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
)
-type SumRunWithGaps[Addr btrfsvol.IntAddr[Addr]] struct {
+type sumRunWithGaps[Addr btrfsvol.IntAddr[Addr]] struct {
// Store the start address and size, in order to facilitate
// leading and trailing gaps.
Addr Addr
@@ -27,16 +27,16 @@ type SumRunWithGaps[Addr btrfsvol.IntAddr[Addr]] struct {
}
var (
- _ lowmemjson.Encodable = SumRunWithGaps[btrfsvol.LogicalAddr]{}
- _ lowmemjson.Decodable = (*SumRunWithGaps[btrfsvol.LogicalAddr])(nil)
+ _ lowmemjson.Encodable = sumRunWithGaps[btrfsvol.LogicalAddr]{}
+ _ lowmemjson.Decodable = (*sumRunWithGaps[btrfsvol.LogicalAddr])(nil)
)
// PatLen implements kmpPattern[int, ShortSum].
-func (sg SumRunWithGaps[Addr]) PatLen() int {
+func (sg sumRunWithGaps[Addr]) PatLen() int {
return int(sg.Size / btrfssum.BlockSize)
}
-func (sg SumRunWithGaps[Addr]) PctFull() float64 {
+func (sg sumRunWithGaps[Addr]) PctFull() float64 {
total := sg.PatLen()
var full int
for _, run := range sg.Runs {
@@ -45,7 +45,7 @@ func (sg SumRunWithGaps[Addr]) PctFull() float64 {
return float64(full) / float64(total)
}
-func (sg SumRunWithGaps[Addr]) RunForAddr(addr Addr) (btrfssum.SumRun[Addr], Addr, bool) {
+func (sg sumRunWithGaps[Addr]) RunForAddr(addr Addr) (btrfssum.SumRun[Addr], Addr, bool) {
for _, run := range sg.Runs {
if run.Addr > addr {
return btrfssum.SumRun[Addr]{}, run.Addr, false
@@ -58,7 +58,7 @@ func (sg SumRunWithGaps[Addr]) RunForAddr(addr Addr) (btrfssum.SumRun[Addr], Add
return btrfssum.SumRun[Addr]{}, math.MaxInt64, false
}
-func (sg SumRunWithGaps[Addr]) SumForAddr(addr Addr) (btrfssum.ShortSum, bool) {
+func (sg sumRunWithGaps[Addr]) SumForAddr(addr Addr) (btrfssum.ShortSum, bool) {
if addr < sg.Addr || addr >= sg.Addr.Add(sg.Size) {
return "", false
}
@@ -80,7 +80,7 @@ func (sg SumRunWithGaps[Addr]) SumForAddr(addr Addr) (btrfssum.ShortSum, bool) {
return run.Sums[off : off+run.ChecksumSize], true
}
-func (sg SumRunWithGaps[Addr]) Walk(ctx context.Context, fn func(Addr, btrfssum.ShortSum) error) error {
+func (sg sumRunWithGaps[Addr]) Walk(ctx context.Context, fn func(Addr, btrfssum.ShortSum) error) error {
for _, run := range sg.Runs {
if err := run.Walk(ctx, fn); err != nil {
return err
@@ -90,12 +90,12 @@ func (sg SumRunWithGaps[Addr]) Walk(ctx context.Context, fn func(Addr, btrfssum.
}
// PatGet implements kmpPattern[int, ShortSum].
-func (sg SumRunWithGaps[Addr]) PatGet(sumIdx int) (btrfssum.ShortSum, bool) {
+func (sg sumRunWithGaps[Addr]) PatGet(sumIdx int) (btrfssum.ShortSum, bool) {
addr := sg.Addr.Add(btrfsvol.AddrDelta(sumIdx) * btrfssum.BlockSize)
return sg.SumForAddr(addr)
}
-func (sg SumRunWithGaps[Addr]) EncodeJSON(w io.Writer) error {
+func (sg sumRunWithGaps[Addr]) EncodeJSON(w io.Writer) error {
if _, err := fmt.Fprintf(w, `{"Addr":%d,"Size":%d,"Runs":[`, sg.Addr, sg.Size); err != nil {
return err
}
@@ -136,8 +136,8 @@ func (sg SumRunWithGaps[Addr]) EncodeJSON(w io.Writer) error {
return nil
}
-func (sg *SumRunWithGaps[Addr]) DecodeJSON(r io.RuneScanner) error {
- *sg = SumRunWithGaps[Addr]{}
+func (sg *sumRunWithGaps[Addr]) DecodeJSON(r io.RuneScanner) error {
+ *sg = sumRunWithGaps[Addr]{}
var name string
return lowmemjson.DecodeObject(r,
func(r io.RuneScanner) error {
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go
index ca1ce8c..0d25ac3 100644
--- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go
@@ -47,9 +47,7 @@ func (o keyAndTree) String() string {
}
type rebuilder struct {
- sb btrfstree.Superblock
- graph btrfsutil.Graph
- keyIO *btrfsutil.KeyIO
+ scan ScanDevicesResult
rebuilt *btrfsutil.RebuiltForrest
@@ -67,9 +65,9 @@ type rebuilder struct {
}
type treeAugmentQueue struct {
- zero map[Want]struct{}
- single map[Want]btrfsvol.LogicalAddr
- multi map[Want]containers.Set[btrfsvol.LogicalAddr]
+ zero map[want]struct{}
+ single map[want]btrfsvol.LogicalAddr
+ multi map[want]containers.Set[btrfsvol.LogicalAddr]
}
type Rebuilder interface {
@@ -79,22 +77,20 @@ type Rebuilder interface {
func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeList []btrfsvol.LogicalAddr) (Rebuilder, error) {
ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.step", "read-fs-data")
- sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeList) // ScanDevices does its own logging
+ scanData, err := ScanDevices(ctx, fs, nodeList) // ScanDevices does its own logging
if err != nil {
return nil, err
}
o := &rebuilder{
- sb: sb,
- graph: nodeGraph,
- keyIO: keyIO,
+ scan: scanData,
}
- o.rebuilt = btrfsutil.NewRebuiltForrest(sb, nodeGraph, keyIO, o)
+ o.rebuilt = btrfsutil.NewRebuiltForrest(fs, scanData.Superblock, scanData.Graph, forrestCallbacks{o})
return o, nil
}
func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] {
- return o.rebuilt.ListRoots(ctx)
+ return o.rebuilt.RebuiltListRoots(ctx)
}
func (o *rebuilder) Rebuild(ctx context.Context) error {
@@ -163,7 +159,7 @@ func (o *rebuilder) processTreeQueue(ctx context.Context) error {
}
// This will call o.AddedItem as nescessary, which
// inserts to o.addedItemQueue.
- _ = o.rebuilt.Tree(ctx, o.curKey.TreeID)
+ _ = o.rebuilt.RebuiltTree(ctx, o.curKey.TreeID)
}
return nil
@@ -201,18 +197,18 @@ func (o *rebuilder) processAddedItemQueue(ctx context.Context) error {
progressWriter.Set(progress)
ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.settle.item", key)
- tree := o.rebuilt.Tree(ctx, key.TreeID)
- incPtr, ok := tree.Items(ctx).Load(key.Key)
+ tree := o.rebuilt.RebuiltTree(ctx, key.TreeID)
+ incPtr, ok := tree.RebuiltItems(ctx).Load(key.Key)
if !ok {
panic(fmt.Errorf("should not happen: failed to load already-added item: %v", key))
}
- excPtr, ok := tree.PotentialItems(ctx).Load(key.Key)
- if ok && tree.ShouldReplace(incPtr.Node, excPtr.Node) {
- wantKey := WantWithTree{
+ excPtr, ok := tree.RebuiltPotentialItems(ctx).Load(key.Key)
+ if ok && tree.RebuiltShouldReplace(incPtr.Node, excPtr.Node) {
+ wantKey := wantWithTree{
TreeID: key.TreeID,
Key: wantFromKey(key.Key),
}
- o.wantAugment(ctx, wantKey, tree.LeafToRoots(ctx, excPtr.Node))
+ o.wantAugment(ctx, wantKey, tree.RebuiltLeafToRoots(ctx, excPtr.Node))
progress.NumAugments = o.numAugments
progress.NumAugmentTrees = len(o.augmentQueue)
progressWriter.Set(progress)
@@ -270,7 +266,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error {
ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.process.item", key)
item := keyAndBody{
keyAndTree: key,
- Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key),
+ Body: o.rebuilt.RebuiltTree(ctx, key.TreeID).ReadItem(ctx, key.Key),
}
select {
case itemChan <- item:
@@ -286,7 +282,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error {
ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.process.item", item.keyAndTree)
o.curKey.TreeID = item.TreeID
o.curKey.Key.Val = item.Key
- btrfscheck.HandleItem(ctx, o, item.TreeID, btrfstree.Item{
+ btrfscheck.HandleItem(ctx, graphCallbacks{o}, item.TreeID, btrfstree.Item{
Key: item.Key,
Body: item.Body,
})
@@ -337,7 +333,7 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error {
progressWriter.Set(progress)
// This will call o.AddedItem as nescessary, which
// inserts to o.addedItemQueue.
- o.rebuilt.Tree(ctx, treeID).AddRoot(ctx, nodeAddr)
+ o.rebuilt.RebuiltTree(ctx, treeID).RebuiltAddRoot(ctx, nodeAddr)
progress.N++
}
}
@@ -385,8 +381,8 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob
} else {
choices[choice] = ChoiceInfo{
Count: 1,
- Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
- Generation: o.graph.Nodes[choice].Generation,
+ Distance: discardOK(o.rebuilt.RebuiltTree(ctx, treeID).RebuiltCOWDistance(o.scan.Graph.Nodes[choice].Owner)),
+ Generation: o.scan.Graph.Nodes[choice].Generation,
}
}
}
@@ -399,8 +395,8 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob
} else {
choices[choice] = ChoiceInfo{
Count: 1,
- Distance: discardOK(o.rebuilt.Tree(ctx, treeID).COWDistance(o.graph.Nodes[choice].Owner)),
- Generation: o.graph.Nodes[choice].Generation,
+ Distance: discardOK(o.rebuilt.RebuiltTree(ctx, treeID).RebuiltCOWDistance(o.scan.Graph.Nodes[choice].Owner)),
+ Generation: o.scan.Graph.Nodes[choice].Generation,
}
}
}
@@ -520,7 +516,7 @@ func (o *rebuilder) resolveTreeAugments(ctx context.Context, treeID btrfsprim.Ob
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-func (queue *treeAugmentQueue) has(wantKey Want) bool {
+func (queue *treeAugmentQueue) has(wantKey want) bool {
if queue != nil {
if queue.zero != nil {
if _, ok := queue.zero[wantKey]; ok {
@@ -541,7 +537,7 @@ func (queue *treeAugmentQueue) has(wantKey Want) bool {
return false
}
-func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsvol.LogicalAddr]) {
+func (queue *treeAugmentQueue) store(wantKey want, choices containers.Set[btrfsvol.LogicalAddr]) {
if len(choices) == 0 && wantKey.OffsetType > offsetExact {
// This wantKey is unlikely to come up again, so it's
// not worth the RAM of storing a negative result.
@@ -550,27 +546,27 @@ func (queue *treeAugmentQueue) store(wantKey Want, choices containers.Set[btrfsv
switch len(choices) {
case 0:
if queue.zero == nil {
- queue.zero = make(map[Want]struct{})
+ queue.zero = make(map[want]struct{})
}
queue.zero[wantKey] = struct{}{}
case 1:
if queue.single == nil {
- queue.single = make(map[Want]btrfsvol.LogicalAddr)
+ queue.single = make(map[want]btrfsvol.LogicalAddr)
}
queue.single[wantKey] = choices.TakeOne()
default:
if queue.multi == nil {
- queue.multi = make(map[Want]containers.Set[btrfsvol.LogicalAddr])
+ queue.multi = make(map[want]containers.Set[btrfsvol.LogicalAddr])
}
queue.multi[wantKey] = choices
}
}
-func (o *rebuilder) hasAugment(wantKey WantWithTree) bool {
+func (o *rebuilder) hasAugment(wantKey wantWithTree) bool {
return o.augmentQueue[wantKey.TreeID].has(wantKey.Key)
}
-func (o *rebuilder) wantAugment(ctx context.Context, wantKey WantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) {
+func (o *rebuilder) wantAugment(ctx context.Context, wantKey wantWithTree, choices containers.Set[btrfsvol.LogicalAddr]) {
if o.augmentQueue[wantKey.TreeID] == nil {
o.augmentQueue[wantKey.TreeID] = new(treeAugmentQueue)
}
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go
index a422a47..92b5ee5 100644
--- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go
@@ -13,8 +13,12 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
)
+type forrestCallbacks struct {
+ *rebuilder
+}
+
// AddedItem implements btrfsutil.RebuiltForrestCallbacks.
-func (o *rebuilder) AddedItem(_ context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) {
+func (o forrestCallbacks) AddedItem(_ context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) {
o.addedItemQueue.Insert(keyAndTree{
TreeID: tree,
Key: key,
@@ -22,17 +26,17 @@ func (o *rebuilder) AddedItem(_ context.Context, tree btrfsprim.ObjID, key btrfs
}
// AddedRoot implements btrfsutil.RebuiltForrestCallbacks.
-func (o *rebuilder) AddedRoot(_ context.Context, tree btrfsprim.ObjID, _ btrfsvol.LogicalAddr) {
+func (o forrestCallbacks) AddedRoot(_ context.Context, tree btrfsprim.ObjID, _ btrfsvol.LogicalAddr) {
if retries := o.retryItemQueue[tree]; retries != nil {
o.addedItemQueue.InsertFrom(retries)
}
}
// LookupRoot implements btrfsutil.RebuiltForrestCallbacks.
-func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) {
- wantKey := WantWithTree{
+func (o forrestCallbacks) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) {
+ wantKey := wantWithTree{
TreeID: btrfsprim.ROOT_TREE_OBJECTID,
- Key: Want{
+ Key: want{
ObjectID: tree,
ItemType: btrfsitem.ROOT_ITEM_KEY,
OffsetType: offsetAny,
@@ -44,13 +48,13 @@ func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offse
o.enqueueRetry(btrfsprim.ROOT_TREE_OBJECTID)
return 0, btrfsitem.Root{}, false
}
- itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey)
+ itemBody := o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).ReadItem(ctx, foundKey)
defer itemBody.Free()
switch itemBody := itemBody.(type) {
case *btrfsitem.Root:
return btrfsprim.Generation(foundKey.Offset), *itemBody, true
case *btrfsitem.Error:
- o.FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err))
+ graphCallbacks(o).FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err))
return 0, btrfsitem.Root{}, false
default:
// This is a panic because the item decoder should not emit ROOT_ITEM items as anything but
@@ -60,8 +64,8 @@ func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offse
}
// LookupUUID implements btrfsutil.RebuiltForrestCallbacks.
-func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) {
- wantKey := WantWithTree{
+func (o forrestCallbacks) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) {
+ wantKey := wantWithTree{
TreeID: btrfsprim.UUID_TREE_OBJECTID,
Key: wantFromKey(btrfsitem.UUIDToKey(uuid)),
}
@@ -70,13 +74,13 @@ func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btr
o.enqueueRetry(btrfsprim.UUID_TREE_OBJECTID)
return 0, false
}
- itemBody := o.rebuilt.Tree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key())
+ itemBody := o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).ReadItem(ctx, wantKey.Key.Key())
defer itemBody.Free()
switch itemBody := itemBody.(type) {
case *btrfsitem.UUIDMap:
return itemBody.ObjID, true
case *btrfsitem.Error:
- o.FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err))
+ graphCallbacks(o).FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err))
return 0, false
default:
// This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go
index 704f4ee..eff2a83 100644
--- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go
@@ -19,16 +19,20 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
)
+type graphCallbacks struct {
+ *rebuilder
+}
+
// FSErr implements btrfscheck.GraphCallbacks.
-func (*rebuilder) FSErr(ctx context.Context, e error) {
+func (graphCallbacks) FSErr(ctx context.Context, e error) {
dlog.Errorf(ctx, "filesystem error: %v", e)
}
// Want implements btrfscheck.GraphCallbacks.
-func (o *rebuilder) Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
- wantKey := WantWithTree{
+func (o graphCallbacks) Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) {
+ wantKey := wantWithTree{
TreeID: treeID,
- Key: Want{
+ Key: want{
ObjectID: objID,
ItemType: typ,
OffsetType: offsetAny,
@@ -38,8 +42,8 @@ func (o *rebuilder) Want(ctx context.Context, reason string, treeID btrfsprim.Ob
o._want(ctx, wantKey)
}
-func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsprim.Key, ok bool) {
- if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
+func (o *rebuilder) _want(ctx context.Context, wantKey wantWithTree) (key btrfsprim.Key, ok bool) {
+ if o.rebuilt.RebuiltTree(ctx, wantKey.TreeID) == nil {
o.enqueueRetry(wantKey.TreeID)
return btrfsprim.Key{}, false
}
@@ -47,7 +51,7 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp
// check if we already have it
tgt := wantKey.Key.Key()
- if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int {
+ if key, _, ok := o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltItems(ctx).Search(func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int {
key.Offset = 0
return tgt.Compare(key)
}); ok {
@@ -60,13 +64,13 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp
return btrfsprim.Key{}, false
}
wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
+ o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltPotentialItems(ctx).Subrange(
func(k btrfsprim.Key, _ btrfsutil.ItemPtr) int {
k.Offset = 0
return tgt.Compare(k)
},
func(_ btrfsprim.Key, v btrfsutil.ItemPtr) bool {
- wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
+ wants.InsertFrom(o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltLeafToRoots(ctx, v.Node))
return true
})
o.wantAugment(ctx, wantKey, wants)
@@ -74,10 +78,10 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp
}
// WantOff implements btrfscheck.GraphCallbacks.
-func (o *rebuilder) WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
- wantKey := WantWithTree{
+func (o graphCallbacks) WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) {
+ wantKey := wantWithTree{
TreeID: treeID,
- Key: Want{
+ Key: want{
ObjectID: objID,
ItemType: typ,
OffsetType: offsetExact,
@@ -88,8 +92,8 @@ func (o *rebuilder) WantOff(ctx context.Context, reason string, treeID btrfsprim
o._wantOff(ctx, wantKey)
}
-func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool) {
- if o.rebuilt.Tree(ctx, wantKey.TreeID) == nil {
+func (o *rebuilder) _wantOff(ctx context.Context, wantKey wantWithTree) (ok bool) {
+ if o.rebuilt.RebuiltTree(ctx, wantKey.TreeID) == nil {
o.enqueueRetry(wantKey.TreeID)
return false
}
@@ -97,7 +101,7 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool
// check if we already have it
tgt := wantKey.Key.Key()
- if _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Load(tgt); ok {
+ if _, ok := o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltItems(ctx).Load(tgt); ok {
return true
}
@@ -107,10 +111,10 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool
return false
}
wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange(
+ o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltPotentialItems(ctx).Subrange(
func(k btrfsprim.Key, _ btrfsutil.ItemPtr) int { return tgt.Compare(k) },
func(_ btrfsprim.Key, v btrfsutil.ItemPtr) bool {
- wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node))
+ wants.InsertFrom(o.rebuilt.RebuiltTree(ctx, wantKey.TreeID).RebuiltLeafToRoots(ctx, v.Node))
return true
})
o.wantAugment(ctx, wantKey, wants)
@@ -118,10 +122,10 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool
}
// WantDirIndex implements btrfscheck.GraphCallbacks.
-func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) {
- wantKey := WantWithTree{
+func (o graphCallbacks) WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) {
+ wantKey := wantWithTree{
TreeID: treeID,
- Key: Want{
+ Key: want{
ObjectID: objID,
ItemType: btrfsitem.DIR_INDEX_KEY,
OffsetType: offsetName,
@@ -130,7 +134,7 @@ func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrf
}
ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
- if o.rebuilt.Tree(ctx, treeID) == nil {
+ if o.rebuilt.RebuiltTree(ctx, treeID) == nil {
o.enqueueRetry(treeID)
return
}
@@ -139,13 +143,13 @@ func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrf
tgt := wantKey.Key.Key()
found := false
- o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange(
+ o.rebuilt.RebuiltTree(ctx, treeID).RebuiltItems(ctx).Subrange(
func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int {
key.Offset = 0
return tgt.Compare(key)
},
func(_ btrfsprim.Key, ptr btrfsutil.ItemPtr) bool {
- if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
+ if itemName, ok := o.scan.Names[ptr]; ok && bytes.Equal(itemName, name) {
found = true
}
return !found
@@ -160,21 +164,21 @@ func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrf
return
}
wants := make(containers.Set[btrfsvol.LogicalAddr])
- o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange(
+ o.rebuilt.RebuiltTree(ctx, treeID).RebuiltPotentialItems(ctx).Subrange(
func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int {
key.Offset = 0
return tgt.Compare(key)
},
func(_ btrfsprim.Key, ptr btrfsutil.ItemPtr) bool {
- if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) {
- wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node))
+ if itemName, ok := o.scan.Names[ptr]; ok && bytes.Equal(itemName, name) {
+ wants.InsertFrom(o.rebuilt.RebuiltTree(ctx, treeID).RebuiltLeafToRoots(ctx, ptr.Node))
}
return true
})
o.wantAugment(ctx, wantKey, wants)
}
-func (o *rebuilder) _walkRange(
+func (o graphCallbacks) _walkRange(
ctx context.Context,
items *containers.SortedMap[btrfsprim.Key, btrfsutil.ItemPtr],
treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
@@ -203,7 +207,7 @@ func (o *rebuilder) _walkRange(
}
},
func(runKey btrfsprim.Key, runPtr btrfsutil.ItemPtr) bool {
- runSizeAndErr, ok := o.keyIO.Sizes[runPtr]
+ runSizeAndErr, ok := o.scan.Sizes[runPtr]
if !ok {
panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded",
runPtr, keyAndTree{TreeID: treeID, Key: runKey}))
@@ -239,14 +243,14 @@ func (a gap) Compare(b gap) int {
return containers.NativeCompare(a.Beg, b.Beg)
}
-func (o *rebuilder) _wantRange(
+func (o graphCallbacks) _wantRange(
ctx context.Context, reason string,
treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType,
beg, end uint64,
) {
- wantKey := WantWithTree{
+ wantKey := wantWithTree{
TreeID: treeID,
- Key: Want{
+ Key: want{
ObjectID: objID,
ItemType: typ,
OffsetType: offsetAny,
@@ -255,7 +259,7 @@ func (o *rebuilder) _wantRange(
ctx = withWant(ctx, logFieldItemWant, reason, wantKey)
wantKey.Key.OffsetType = offsetRange
- if o.rebuilt.Tree(ctx, treeID) == nil {
+ if o.rebuilt.RebuiltTree(ctx, treeID) == nil {
o.enqueueRetry(treeID)
return
}
@@ -271,7 +275,7 @@ func (o *rebuilder) _wantRange(
})
o._walkRange(
ctx,
- o.rebuilt.Tree(ctx, treeID).Items(ctx),
+ o.rebuilt.RebuiltTree(ctx, treeID).RebuiltItems(ctx),
treeID, objID, typ, beg, end,
func(runKey btrfsprim.Key, _ btrfsutil.ItemPtr, runBeg, runEnd uint64) {
var overlappingGaps []*containers.RBNode[gap]
@@ -316,7 +320,7 @@ func (o *rebuilder) _wantRange(
if gaps.Len() == 0 {
return
}
- potentialItems := o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx)
+ potentialItems := o.rebuilt.RebuiltTree(ctx, treeID).RebuiltPotentialItems(ctx)
gaps.Range(func(rbNode *containers.RBNode[gap]) bool {
gap := rbNode.Value
last := gap.Beg
@@ -336,7 +340,7 @@ func (o *rebuilder) _wantRange(
wantKey.Key.OffsetLow = gap.Beg
wantKey.Key.OffsetHigh = gap.End
wantCtx := withWant(ctx, logFieldItemWant, reason, wantKey)
- o.wantAugment(wantCtx, wantKey, o.rebuilt.Tree(wantCtx, treeID).LeafToRoots(wantCtx, v.Node))
+ o.wantAugment(wantCtx, wantKey, o.rebuilt.RebuiltTree(wantCtx, treeID).RebuiltLeafToRoots(wantCtx, v.Node))
last = runEnd
})
if last < gap.End {
@@ -353,10 +357,10 @@ func (o *rebuilder) _wantRange(
// WantCSum implements btrfscheck.GraphCallbacks.
//
// interval is [beg, end)
-func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) {
- inodeWant := WantWithTree{
+func (o graphCallbacks) WantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) {
+ inodeWant := wantWithTree{
TreeID: inodeTree,
- Key: Want{
+ Key: want{
ObjectID: inode,
ItemType: btrfsitem.INODE_ITEM_KEY,
OffsetType: offsetExact,
@@ -368,11 +372,11 @@ func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inod
o.enqueueRetry(inodeTree)
return
}
- inodePtr, ok := o.rebuilt.Tree(inodeCtx, inodeTree).Items(inodeCtx).Load(inodeWant.Key.Key())
+ inodePtr, ok := o.rebuilt.RebuiltTree(inodeCtx, inodeTree).RebuiltItems(inodeCtx).Load(inodeWant.Key.Key())
if !ok {
panic(fmt.Errorf("should not happen: could not load key: %v", inodeWant))
}
- inodeFlags, ok := o.keyIO.Flags[inodePtr]
+ inodeFlags, ok := o.scan.Flags[inodePtr]
if !ok {
panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded"))
}
@@ -392,7 +396,7 @@ func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inod
}
// WantFileExt implements btrfscheck.GraphCallbacks.
-func (o *rebuilder) WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
+func (o graphCallbacks) WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) {
o._wantRange(
ctx, reason,
treeID, ino, btrfsprim.EXTENT_DATA_KEY,
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go
index 8fe8a49..6c9d72b 100644
--- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go
@@ -23,8 +23,8 @@ const (
offsetName
)
-type Want struct {
- // TODO(lukeshu): Delete the 'Want' type in favor of
+type want struct {
+ // TODO(lukeshu): Delete the 'want' type in favor of
// btrfstree.Search.
ObjectID btrfsprim.ObjID
ItemType btrfsprim.ItemType
@@ -34,7 +34,7 @@ type Want struct {
OffsetName string
}
-func (a Want) Compare(b Want) int {
+func (a want) Compare(b want) int {
if d := containers.NativeCompare(a.ObjectID, b.ObjectID); d != 0 {
return d
}
@@ -56,7 +56,7 @@ func (a Want) Compare(b Want) int {
return 0
}
-func (o Want) Key() btrfsprim.Key {
+func (o want) Key() btrfsprim.Key {
return btrfsprim.Key{
ObjectID: o.ObjectID,
ItemType: o.ItemType,
@@ -64,8 +64,8 @@ func (o Want) Key() btrfsprim.Key {
}
}
-func wantFromKey(k btrfsprim.Key) Want {
- return Want{
+func wantFromKey(k btrfsprim.Key) want {
+ return want{
ObjectID: k.ObjectID,
ItemType: k.ItemType,
OffsetType: offsetExact,
@@ -73,7 +73,7 @@ func wantFromKey(k btrfsprim.Key) Want {
}
}
-func (o Want) String() string {
+func (o want) String() string {
switch o.OffsetType {
case offsetAny:
return fmt.Sprintf("{%v %v ?}", o.ObjectID, o.ItemType)
@@ -88,12 +88,12 @@ func (o Want) String() string {
}
}
-type WantWithTree struct {
+type wantWithTree struct {
TreeID btrfsprim.ObjID
- Key Want
+ Key want
}
-func (o WantWithTree) String() string {
+func (o wantWithTree) String() string {
return fmt.Sprintf("tree=%v key=%v", o.TreeID, o.Key)
}
@@ -102,7 +102,7 @@ const (
logFieldTreeWant = "btrfs.util.rebuilt-forrest.add-tree.want"
)
-func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context {
+func withWant(ctx context.Context, logField, reason string, wantKey wantWithTree) context.Context {
ctx = dlog.WithField(ctx, logField+".reason", reason)
ctx = dlog.WithField(ctx, logField+".key", wantKey)
return ctx
diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go
index ba56c5b..ada9f6f 100644
--- a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go
+++ b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go
@@ -6,11 +6,14 @@ package rebuildtrees
import (
"context"
+ "fmt"
"time"
"github.com/datawire/dlib/dlog"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil"
@@ -18,11 +21,31 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
-func ScanDevices(ctx context.Context, fs *btrfs.FS, nodeList []btrfsvol.LogicalAddr) (btrfstree.Superblock, btrfsutil.Graph, *btrfsutil.KeyIO, error) {
+type SizeAndErr struct {
+ Size uint64
+ Err error
+}
+
+type FlagsAndErr struct {
+ NoDataSum bool
+ Err error
+}
+
+type ScanDevicesResult struct {
+ Superblock btrfstree.Superblock
+
+ Graph btrfsutil.Graph
+
+ Flags map[btrfsutil.ItemPtr]FlagsAndErr // INODE_ITEM
+ Names map[btrfsutil.ItemPtr][]byte // DIR_INDEX
+ Sizes map[btrfsutil.ItemPtr]SizeAndErr // EXTENT_CSUM and EXTENT_DATA
+}
+
+func ScanDevices(ctx context.Context, fs *btrfs.FS, nodeList []btrfsvol.LogicalAddr) (ScanDevicesResult, error) {
dlog.Info(ctx, "Reading superblock...")
sb, err := fs.Superblock()
if err != nil {
- return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err
+ return ScanDevicesResult{}, err
}
dlog.Infof(ctx, "Reading node data from FS...")
@@ -33,26 +56,32 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, nodeList []btrfsvol.LogicalA
dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.read.substep", "read-nodes"),
dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- nodeGraph := btrfsutil.NewGraph(*sb)
- keyIO := btrfsutil.NewKeyIO(fs, *sb)
+ ret := ScanDevicesResult{
+ Superblock: *sb,
+
+ Graph: btrfsutil.NewGraph(*sb),
+
+ Flags: make(map[btrfsutil.ItemPtr]FlagsAndErr),
+ Names: make(map[btrfsutil.ItemPtr][]byte),
+ Sizes: make(map[btrfsutil.ItemPtr]SizeAndErr),
+ }
progressWriter.Set(stats)
for _, laddr := range nodeList {
if err := ctx.Err(); err != nil {
- return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err
+ return ScanDevicesResult{}, err
}
- nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ node, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
+ LAddr: containers.OptionalValue(laddr),
})
if err != nil {
- btrfstree.FreeNodeRef(nodeRef)
- return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err
+ node.Free()
+ return ScanDevicesResult{}, err
}
- nodeGraph.InsertNode(nodeRef)
- keyIO.InsertNode(nodeRef)
+ ret.insertNode(node)
- btrfstree.FreeNodeRef(nodeRef)
+ node.Free()
stats.N++
progressWriter.Set(stats)
@@ -64,10 +93,54 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, nodeList []btrfsvol.LogicalA
dlog.Info(ctx, "... done reading node data")
ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.read.substep", "check")
- if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil {
- return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err
+ if err := ret.Graph.FinalCheck(ctx, fs, *sb); err != nil {
+ return ScanDevicesResult{}, err
}
- keyIO.SetGraph(*nodeGraph)
- return *sb, *nodeGraph, keyIO, nil
+ return ret, nil
+}
+
+func (o *ScanDevicesResult) insertNode(node *btrfstree.Node) {
+ o.Graph.InsertNode(node)
+ for i, item := range node.BodyLeaf {
+ ptr := btrfsutil.ItemPtr{
+ Node: node.Head.Addr,
+ Slot: i,
+ }
+ switch itemBody := item.Body.(type) {
+ case *btrfsitem.Inode:
+ o.Flags[ptr] = FlagsAndErr{
+ NoDataSum: itemBody.Flags.Has(btrfsitem.INODE_NODATASUM),
+ Err: nil,
+ }
+ case *btrfsitem.DirEntry:
+ if item.Key.ItemType == btrfsprim.DIR_INDEX_KEY {
+ o.Names[ptr] = append([]byte(nil), itemBody.Name...)
+ }
+ case *btrfsitem.ExtentCSum:
+ o.Sizes[ptr] = SizeAndErr{
+ Size: uint64(itemBody.Size()),
+ Err: nil,
+ }
+ case *btrfsitem.FileExtent:
+ size, err := itemBody.Size()
+ o.Sizes[ptr] = SizeAndErr{
+ Size: uint64(size),
+ Err: err,
+ }
+ case *btrfsitem.Error:
+ switch item.Key.ItemType {
+ case btrfsprim.INODE_ITEM_KEY:
+ o.Flags[ptr] = FlagsAndErr{
+ Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w",
+ ptr, node.Head.Owner, item.Key, itemBody.Err),
+ }
+ case btrfsprim.EXTENT_CSUM_KEY, btrfsprim.EXTENT_DATA_KEY:
+ o.Sizes[ptr] = SizeAndErr{
+ Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w",
+ ptr, node.Head.Owner, item.Key, itemBody.Err),
+ }
+ }
+ }
+ }
}
diff --git a/cmd/btrfs-rec/inspect_lsfiles.go b/cmd/btrfs-rec/inspect_lsfiles.go
index a2b46ab..04b5ec5 100644
--- a/cmd/btrfs-rec/inspect_lsfiles.go
+++ b/cmd/btrfs-rec/inspect_lsfiles.go
@@ -6,23 +6,14 @@ package main
import (
"bufio"
- "errors"
- "fmt"
- "io"
"os"
- "path"
- "strings"
- "github.com/datawire/dlib/derror"
"github.com/datawire/ocibuild/pkg/cliutil"
"github.com/spf13/cobra"
+ "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/lsfiles"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
- "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil"
- "git.lukeshu.com/btrfs-progs-ng/lib/maps"
- "git.lukeshu.com/btrfs-progs-ng/lib/textui"
)
func init() {
@@ -37,221 +28,10 @@ func init() {
err = _err
}
}()
- defer func() {
- if _err := derror.PanicToError(recover()); _err != nil {
- textui.Fprintf(out, "\n\n%+v\n", _err)
- err = _err
- }
- }()
- ctx := cmd.Context()
- printSubvol(out, "", true, "/", &btrfs.Subvolume{
- FS: btrfsutil.NewOldRebuiltForrest(ctx, fs),
- TreeID: btrfsprim.FS_TREE_OBJECTID,
- })
-
- return nil
+ return lsfiles.LsFiles(
+ out,
+ btrfsutil.NewOldRebuiltForrest(cmd.Context(), fs))
}),
})
}
-
-const (
- tS = "    "
- tl = "│   "
- tT = "├── "
- tL = "└── "
-)
-
-func printText(out io.Writer, prefix string, isLast bool, name, text string) {
- first, rest := tT, tl
- if isLast {
- first, rest = tL, tS
- }
- for i, line := range strings.Split(textui.Sprintf("%q %s", name, text), "\n") {
- _, _ = io.WriteString(out, prefix)
- if i == 0 {
- _, _ = io.WriteString(out, first)
- } else {
- _, _ = io.WriteString(out, rest)
- }
- _, _ = io.WriteString(out, line)
- _, _ = io.WriteString(out, "\n")
- }
-}
-
-func printSubvol(out io.Writer, prefix string, isLast bool, name string, subvol *btrfs.Subvolume) {
- rootInode, err := subvol.GetRootInode()
- if err != nil {
- printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
- subvol.TreeID, fmtErr(err)))
- return
- }
- dir, err := subvol.LoadDir(rootInode)
- if err != nil {
- printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
- subvol.TreeID, fmtErr(err)))
- return
- }
- if name == "/" {
- printDir(out, prefix, isLast, name, dir)
- return
- }
- printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v", subvol.TreeID))
- if isLast {
- prefix += tS
- } else {
- prefix += tl
- }
- printDir(out, prefix, true, name, dir)
-}
-
-func fmtErr(err error) string {
- errStr := err.Error()
- if strings.Contains(errStr, "\n") {
- errStr = "\\\n" + errStr
- }
- return errStr
-}
-
-func fmtInode(inode btrfs.BareInode) string {
- var mode btrfsitem.StatMode
- if inode.InodeItem == nil {
- inode.Errs = append(inode.Errs, errors.New("missing INODE_ITEM"))
- } else {
- mode = inode.InodeItem.Mode
- }
- ret := textui.Sprintf("ino=%v mode=%v", inode.Inode, mode)
- if len(inode.Errs) > 0 {
- ret += " err=" + fmtErr(inode.Errs)
- }
- return ret
-}
-
-func printDir(out io.Writer, prefix string, isLast bool, name string, dir *btrfs.Dir) {
- printText(out, prefix, isLast, name+"/", fmtInode(dir.BareInode))
- if isLast {
- prefix += tS
- } else {
- prefix += tl
- }
- for i, childName := range maps.SortedKeys(dir.ChildrenByName) {
- printDirEntry(
- out,
- prefix,
- i == len(dir.ChildrenByName)-1,
- dir.SV,
- path.Join(name, childName),
- dir.ChildrenByName[childName])
- }
-}
-
-func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subvolume, name string, entry btrfsitem.DirEntry) {
- if len(entry.Data) != 0 {
- panic(fmt.Errorf("TODO: I don't know how to handle dirent.data: %q", name))
- }
- switch entry.Type {
- case btrfsitem.FT_DIR:
- switch entry.Location.ItemType {
- case btrfsitem.INODE_ITEM_KEY:
- dir, err := subvol.LoadDir(entry.Location.ObjectID)
- if err != nil {
- printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
- return
- }
- printDir(out, prefix, isLast, name, dir)
- case btrfsitem.ROOT_ITEM_KEY:
- printSubvol(out, prefix, isLast, name, &btrfs.Subvolume{
- FS: subvol.FS,
- TreeID: entry.Location.ObjectID,
- })
- default:
- panic(fmt.Errorf("TODO: I don't know how to handle an FT_DIR with location.ItemType=%v: %q",
- entry.Location.ItemType, name))
- }
- case btrfsitem.FT_SYMLINK:
- if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
- panic(fmt.Errorf("TODO: I don't know how to handle an FT_SYMLINK with location.ItemType=%v: %q",
- entry.Location.ItemType, name))
- }
- file, err := subvol.LoadFile(entry.Location.ObjectID)
- if err != nil {
- printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
- return
- }
- printSymlink(out, prefix, isLast, name, file)
- case btrfsitem.FT_REG_FILE:
- if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
- panic(fmt.Errorf("TODO: I don't know how to handle an FT_REG_FILE with location.ItemType=%v: %q",
- entry.Location.ItemType, name))
- }
- file, err := subvol.LoadFile(entry.Location.ObjectID)
- if err != nil {
- printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
- return
- }
- printFile(out, prefix, isLast, name, file)
- case btrfsitem.FT_SOCK:
- if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
- panic(fmt.Errorf("TODO: I don't know how to handle an FT_SOCK with location.ItemType=%v: %q",
- entry.Location.ItemType, name))
- }
- file, err := subvol.LoadFile(entry.Location.ObjectID)
- if err != nil {
- printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
- return
- }
- printSocket(out, prefix, isLast, name, file)
- case btrfsitem.FT_FIFO:
- if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
- panic(fmt.Errorf("TODO: I don't know how to handle an FT_FIFO with location.ItemType=%v: %q",
- entry.Location.ItemType, name))
- }
- file, err := subvol.LoadFile(entry.Location.ObjectID)
- if err != nil {
- printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
- return
- }
- printPipe(out, prefix, isLast, name, file)
- default:
- panic(fmt.Errorf("TODO: I don't know how to handle a fileType=%v: %q",
- entry.Type, name))
- }
-}
-
-func printSymlink(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
- var tgt []byte
- if file.InodeItem != nil {
- var err error
- tgt, err = io.ReadAll(io.NewSectionReader(file, 0, file.InodeItem.Size))
- if err != nil {
- file.Errs = append(file.Errs, err)
- }
- }
- printText(out, prefix, isLast, name, textui.Sprintf(
- "-> %q : %s",
- tgt,
- fmtInode(file.BareInode)))
-}
-
-func printFile(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
- if file.InodeItem != nil {
- if _, err := io.Copy(io.Discard, io.NewSectionReader(file, 0, file.InodeItem.Size)); err != nil {
- file.Errs = append(file.Errs, err)
- }
- }
- printText(out, prefix, isLast, name, fmtInode(file.BareInode))
-}
-
-func printSocket(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
- if file.InodeItem != nil && file.InodeItem.Size > 0 {
- panic(fmt.Errorf("TODO: I don't know how to handle a socket with size>0: %q", name))
- }
- printText(out, prefix, isLast, name, fmtInode(file.BareInode))
-}
-
-func printPipe(out io.Writer, prefix string, isLast bool, name string, file *btrfs.File) {
- if file.InodeItem != nil && file.InodeItem.Size > 0 {
- panic(fmt.Errorf("TODO: I don't know how to handle a pipe with size>0: %q", name))
- }
- printText(out, prefix, isLast, name, fmtInode(file.BareInode))
-}
diff --git a/cmd/btrfs-rec/inspect_lstrees.go b/cmd/btrfs-rec/inspect_lstrees.go
index 05c3a57..cad1a37 100644
--- a/cmd/btrfs-rec/inspect_lstrees.go
+++ b/cmd/btrfs-rec/inspect_lstrees.go
@@ -19,7 +19,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
- "git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
"git.lukeshu.com/btrfs-progs-ng/lib/textui"
@@ -76,16 +75,16 @@ func init() {
treeErrCnt++
},
TreeWalkHandler: btrfstree.TreeWalkHandler{
- Node: func(_ btrfstree.TreePath, ref *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) error {
- visitedNodes.Insert(ref.Addr)
+ Node: func(path btrfstree.Path, node *btrfstree.Node) error {
+ visitedNodes.Insert(path.Node(-1).ToNodeAddr)
return nil
},
- Item: func(_ btrfstree.TreePath, item btrfstree.Item) error {
+ Item: func(_ btrfstree.Path, item btrfstree.Item) error {
typ := item.Key.ItemType
treeItemCnt[typ]++
return nil
},
- BadItem: func(_ btrfstree.TreePath, item btrfstree.Item) error {
+ BadItem: func(_ btrfstree.Path, item btrfstree.Item) error {
typ := item.Key.ItemType
treeItemCnt[typ]++
return nil
@@ -107,13 +106,13 @@ func init() {
}
visitedNodes.Insert(laddr)
node, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{
- LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr},
+ LAddr: containers.OptionalValue(laddr),
})
if err != nil {
treeErrCnt++
continue
}
- for _, item := range node.Data.BodyLeaf {
+ for _, item := range node.BodyLeaf {
typ := item.Key.ItemType
treeItemCnt[typ]++
}
diff --git a/cmd/btrfs-rec/inspect_spewitems.go b/cmd/btrfs-rec/inspect_spewitems.go
index d8a65ae..b83e989 100644
--- a/cmd/btrfs-rec/inspect_spewitems.go
+++ b/cmd/btrfs-rec/inspect_spewitems.go
@@ -34,13 +34,13 @@ func init() {
dlog.Error(ctx, err)
},
TreeWalkHandler: btrfstree.TreeWalkHandler{
- Item: func(path btrfstree.TreePath, item btrfstree.Item) error {
+ Item: func(path btrfstree.Path, item btrfstree.Item) error {
textui.Fprintf(os.Stdout, "%s = ", path)
spew.Dump(item)
_, _ = os.Stdout.WriteString("\n")
return nil
},
- BadItem: func(path btrfstree.TreePath, item btrfstree.Item) error {
+ BadItem: func(path btrfstree.Path, item btrfstree.Item) error {
textui.Fprintf(os.Stdout, "%s = ", path)
spew.Dump(item)
_, _ = os.Stdout.WriteString("\n")