From 8c8c6c27552f8554ba014c34d684cb90538ef65b Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Tue, 28 Feb 2023 14:05:27 -0700 Subject: Move files around [ci-skip] --- lib/btrfsutil/graph.go | 265 +++++++++++++++++++++++++ lib/btrfsutil/graph_loops.go | 133 +++++++++++++ lib/btrfsutil/nestedlock.go | 45 +++++ lib/btrfsutil/old_rebuilt_forrest.go | 363 +++++++++++++++++++++++++++++++++++ lib/btrfsutil/open.go | 46 +++++ lib/btrfsutil/print_addrspace.go | 73 +++++++ lib/btrfsutil/rebuilt_forrest.go | 208 ++++++++++++++++++++ lib/btrfsutil/rebuilt_readitem.go | 172 +++++++++++++++++ lib/btrfsutil/rebuilt_tree.go | 357 ++++++++++++++++++++++++++++++++++ lib/btrfsutil/skinny_paths.go | 146 ++++++++++++++ lib/btrfsutil/walk.go | 97 ++++++++++ 11 files changed, 1905 insertions(+) create mode 100644 lib/btrfsutil/graph.go create mode 100644 lib/btrfsutil/graph_loops.go create mode 100644 lib/btrfsutil/nestedlock.go create mode 100644 lib/btrfsutil/old_rebuilt_forrest.go create mode 100644 lib/btrfsutil/open.go create mode 100644 lib/btrfsutil/print_addrspace.go create mode 100644 lib/btrfsutil/rebuilt_forrest.go create mode 100644 lib/btrfsutil/rebuilt_readitem.go create mode 100644 lib/btrfsutil/rebuilt_tree.go create mode 100644 lib/btrfsutil/skinny_paths.go create mode 100644 lib/btrfsutil/walk.go (limited to 'lib/btrfsutil') diff --git a/lib/btrfsutil/graph.go b/lib/btrfsutil/graph.go new file mode 100644 index 0000000..2a97ec8 --- /dev/null +++ b/lib/btrfsutil/graph.go @@ -0,0 +1,265 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package graph + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" + "git.lukeshu.com/btrfs-progs-ng/lib/maps" + "git.lukeshu.com/btrfs-progs-ng/lib/slices" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +type Node struct { + Level uint8 + Generation btrfsprim.Generation + Owner btrfsprim.ObjID + NumItems uint32 + MinItem btrfsprim.Key + MaxItem btrfsprim.Key + Items []btrfsprim.Key +} + +func (n Node) String() string { + if reflect.ValueOf(n).IsZero() { + return "{}" + } + return fmt.Sprintf(`{lvl:%v, gen:%v, tree:%v, cnt:%v, min:(%v,%v,%v), max:(%v,%v,%v)}`, + n.Level, n.Generation, n.Owner, n.NumItems, + n.MinItem.ObjectID, n.MinItem.ItemType, n.MinItem.Offset, + n.MaxItem.ObjectID, n.MaxItem.ItemType, n.MaxItem.Offset) +} + +type Edge struct { + // It is invalid for both 'FromRoot' and 'FromNode' to be + // non-zero. If both are zero, then the Edge is from the + // superblock. + FromRoot btrfsvol.LogicalAddr + FromNode btrfsvol.LogicalAddr + FromItem int // only valid if one of FromRoot or FromNode is non-zero + + FromTree btrfsprim.ObjID + + ToNode btrfsvol.LogicalAddr + ToLevel uint8 + ToKey btrfsprim.Key + ToGeneration btrfsprim.Generation +} + +func (kp Edge) String() string { + var from string + switch { + case kp.FromRoot != 0: + from = fmt.Sprintf("root@%v[%d]:%v", + kp.FromRoot, kp.FromItem, kp.FromTree) + case kp.FromNode != 0: + from = fmt.Sprintf("{node:%v, tree:%v}[%d]", + kp.FromNode, kp.FromTree, kp.FromItem) + default: + from = fmt.Sprintf("superblock:%v", kp.FromTree) + } + return fmt.Sprintf(`%s -> {n:%v,l:%v,g:%v,k:(%v,%v,%v)}`, + from, + kp.ToNode, kp.ToLevel, kp.ToGeneration, + kp.ToKey.ObjectID, + kp.ToKey.ItemType, + kp.ToKey.Offset) +} + +type Graph struct { + Nodes map[btrfsvol.LogicalAddr]Node + BadNodes map[btrfsvol.LogicalAddr]error + EdgesFrom map[btrfsvol.LogicalAddr][]*Edge + EdgesTo map[btrfsvol.LogicalAddr][]*Edge +} + +func (g Graph) insertEdge(ptr *Edge) { + if ptr.ToNode == 0 { + panic("kp.ToNode should not be zero") + } + if ptr.FromRoot != 0 && ptr.FromNode != 0 { + panic("kp.FromRoot and kp.FromNode should not both be set") + } + if (ptr.FromRoot == 0 && ptr.FromNode == 0) && ptr.FromItem != 0 { + panic("kp.FromItem should only be set if either kp.FromRoot or kp.FromItem is set") + } + g.EdgesFrom[ptr.FromNode] = append(g.EdgesFrom[ptr.FromNode], ptr) + g.EdgesTo[ptr.ToNode] = append(g.EdgesTo[ptr.ToNode], ptr) +} + +func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) { + treeInfo, err := btrfstree.LookupTreeRoot(nil, sb, treeID) + if err != nil { + // This shouldn't ever happen for treeIDs that are + // mentioned directly in the superblock; which are the + // only trees for which we should call + // .insertTreeRoot(). + panic(fmt.Errorf("LookupTreeRoot(%v): %w", treeID, err)) + } + if treeInfo.RootNode == 0 { + return + } + g.insertEdge(&Edge{ + FromTree: treeID, + ToNode: treeInfo.RootNode, + ToLevel: treeInfo.Level, + ToGeneration: treeInfo.Generation, + }) +} + +func New(sb btrfstree.Superblock) *Graph { + g := &Graph{ + Nodes: make(map[btrfsvol.LogicalAddr]Node), + BadNodes: make(map[btrfsvol.LogicalAddr]error), + EdgesFrom: make(map[btrfsvol.LogicalAddr][]*Edge), + EdgesTo: make(map[btrfsvol.LogicalAddr][]*Edge), + } + + // These 4 trees are mentioned directly in the superblock, so + // they are always seen. + g.insertTreeRoot(sb, btrfsprim.ROOT_TREE_OBJECTID) + g.insertTreeRoot(sb, btrfsprim.CHUNK_TREE_OBJECTID) + g.insertTreeRoot(sb, btrfsprim.TREE_LOG_OBJECTID) + g.insertTreeRoot(sb, btrfsprim.BLOCK_GROUP_TREE_OBJECTID) + + return g +} + +func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { + nodeData := Node{ + Level: nodeRef.Data.Head.Level, + Generation: nodeRef.Data.Head.Generation, + Owner: nodeRef.Data.Head.Owner, + NumItems: nodeRef.Data.Head.NumItems, + MinItem: discardOK(nodeRef.Data.MinItem()), + MaxItem: discardOK(nodeRef.Data.MaxItem()), + } + + if nodeRef.Data.Head.Level == 0 { + cnt := 0 + for _, item := range nodeRef.Data.BodyLeaf { + if _, ok := item.Body.(*btrfsitem.Root); ok { + cnt++ + } + } + kps := make([]Edge, 0, cnt) + keys := make([]btrfsprim.Key, len(nodeRef.Data.BodyLeaf)) + nodeData.Items = keys + g.Nodes[nodeRef.Addr] = nodeData + for i, item := range nodeRef.Data.BodyLeaf { + keys[i] = item.Key + if itemBody, ok := item.Body.(*btrfsitem.Root); ok { + kps = append(kps, Edge{ + FromRoot: nodeRef.Addr, + FromItem: i, + FromTree: item.Key.ObjectID, + ToNode: itemBody.ByteNr, + ToLevel: itemBody.Level, + ToGeneration: itemBody.Generation, + }) + g.insertEdge(&kps[len(kps)-1]) + } + } + } else { + g.Nodes[nodeRef.Addr] = nodeData + kps := make([]Edge, len(nodeRef.Data.BodyInternal)) + for i, kp := range nodeRef.Data.BodyInternal { + kps[i] = Edge{ + FromNode: nodeRef.Addr, + FromItem: i, + FromTree: nodeRef.Data.Head.Owner, + ToNode: kp.BlockPtr, + ToLevel: nodeRef.Data.Head.Level - 1, + ToKey: kp.Key, + ToGeneration: kp.Generation, + } + g.insertEdge(&kps[i]) + } + } +} + +func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error { + var stats textui.Portion[int] + _ctx := ctx + + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers") + dlog.Info(_ctx, "Checking keypointers for dead-ends...") + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + stats.D = len(g.EdgesTo) + progressWriter.Set(stats) + for laddr := range g.EdgesTo { + if _, ok := g.Nodes[laddr]; !ok { + _, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, sb, laddr, btrfstree.NodeExpectations{ + LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, + }) + if err == nil { + progressWriter.Done() + return fmt.Errorf("node@%v exists but was not in node scan results", laddr) + } + g.BadNodes[laddr] = err + } + stats.N++ + progressWriter.Set(stats) + } + progressWriter.Done() + dlog.Info(ctx, "... done checking keypointers") + + ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops") + dlog.Info(_ctx, "Checking for btree loops...") + stats.D = len(g.Nodes) + stats.N = 0 + progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + progressWriter.Set(stats) + visited := make(containers.Set[btrfsvol.LogicalAddr], len(g.Nodes)) + numLoops := 0 + var checkNode func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) + checkNode = func(node btrfsvol.LogicalAddr, stack []btrfsvol.LogicalAddr) { + defer func() { + stats.N = len(visited) + progressWriter.Set(stats) + }() + if visited.Has(node) { + return + } + if slices.Contains(node, stack) { + numLoops++ + dlog.Error(ctx, "loop:") + for _, line := range g.renderLoop(append(stack, node)) { + dlog.Errorf(ctx, " %s", line) + } + return + } + stack = append(stack, node) + for _, kp := range g.EdgesTo[node] { + checkNode(kp.FromNode, stack) + } + visited.Insert(node) + } + for _, node := range maps.SortedKeys(g.Nodes) { + checkNode(node, nil) + } + progressWriter.Done() + if numLoops > 0 { + return fmt.Errorf("%d btree loops", numLoops) + } + dlog.Info(_ctx, "... done checking for loops") + + return nil +} + +func discardOK[T any](val T, _ bool) T { + return val +} diff --git a/lib/btrfsutil/graph_loops.go b/lib/btrfsutil/graph_loops.go new file mode 100644 index 0000000..0e51805 --- /dev/null +++ b/lib/btrfsutil/graph_loops.go @@ -0,0 +1,133 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package graph + +import ( + "fmt" + "strings" + + "github.com/datawire/dlib/derror" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" +) + +func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string { + if node == 0 { + return []string{"root"} + } else if nodeData, ok := g.Nodes[node]; ok { + return []string{ + fmt.Sprintf("{addr: %v,", node), + fmt.Sprintf(" level: %v,", nodeData.Level), + fmt.Sprintf(" gen: %v,", nodeData.Generation), + fmt.Sprintf(" num_items: %v,", nodeData.NumItems), + fmt.Sprintf(" min_item: {%d,%v,%d},", + nodeData.MinItem.ObjectID, + nodeData.MinItem.ItemType, + nodeData.MinItem.Offset), + fmt.Sprintf(" max_item: {%d,%v,%d}}", + nodeData.MaxItem.ObjectID, + nodeData.MaxItem.ItemType, + nodeData.MaxItem.Offset), + } + } else if nodeErr, ok := g.BadNodes[node]; ok { + return []string{ + fmt.Sprintf("{addr:%v,", node), + fmt.Sprintf(" err:%q}", nodeErr.Error()), + } + } else { + panic("should not happen") + } +} + +func (g Graph) renderEdge(kp Edge) []string { + a := fmt.Sprintf("[%d]={", kp.FromItem) + b := strings.Repeat(" ", len(a)) + ret := []string{ + a + fmt.Sprintf("ToNode: %v,", kp.ToNode), + b + fmt.Sprintf("ToLevel: %v,", kp.ToLevel), + b + fmt.Sprintf("ToGen: %v,", kp.ToGeneration), + b + fmt.Sprintf("ToKey: {%d,%v,%d}}", + kp.ToKey.ObjectID, + kp.ToKey.ItemType, + kp.ToKey.Offset), + } + + var err error + if toNode, ok := g.Nodes[kp.ToNode]; !ok { + err = g.BadNodes[kp.ToNode] + } else { + err = checkNodeExpectations(kp, toNode) + } + if err != nil { + c := strings.Repeat(" ", len(a)-1) + ret = append(ret, + c+"^", + c+"`-err="+strings.ReplaceAll(err.Error(), "\n", "\n"+c+" "), + ) + } + return ret +} + +func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string { + var lines []string + add := func(suffixes []string) { + curLen := 0 + for _, line := range lines { + if len(line) > curLen { + curLen = len(line) + } + } + for i, suffix := range suffixes { + if len(lines) <= i { + lines = append(lines, "") + } + if len(lines[i]) < curLen { + if i == 0 { + lines[i] += strings.Repeat("-", curLen-len(lines[i])-1) + ">" + } else { + lines[i] += strings.Repeat(" ", curLen-len(lines[i])) + } + } + lines[i] += suffix + } + } + + for i, node := range stack { + if i > 0 { + for _, kp := range g.EdgesTo[node] { + if kp.FromNode == stack[i-1] { + add(g.renderEdge(*kp)) + break + } + } + } + add(g.renderNode(node)) + } + + return lines +} + +func checkNodeExpectations(kp Edge, toNode Node) error { + var errs derror.MultiError + if toNode.Level != kp.ToLevel { + errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v", + kp.ToLevel, toNode.Level)) + } + if toNode.Generation != kp.ToGeneration { + errs = append(errs, fmt.Errorf("kp.generation=%v != node.generation=%v", + kp.ToGeneration, toNode.Generation)) + } + if toNode.NumItems == 0 { + errs = append(errs, fmt.Errorf("node.num_items=0")) + } else if kp.ToKey != (btrfsprim.Key{}) && toNode.MinItem != kp.ToKey { + errs = append(errs, fmt.Errorf("kp.key=%v != node.items[0].key=%v", + kp.ToKey, toNode.MinItem)) + } + if len(errs) > 0 { + return errs + } + return nil +} diff --git a/lib/btrfsutil/nestedlock.go b/lib/btrfsutil/nestedlock.go new file mode 100644 index 0000000..c1ffa18 --- /dev/null +++ b/lib/btrfsutil/nestedlock.go @@ -0,0 +1,45 @@ +// Copyright (C) 2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrees + +import ( + "context" + "sync" +) + +// A nestedMutex is like a sync.Mutex, but while it is locked by call +// 'A', may be simultaneously locked by subsequent calls if the +// subsequent calls use a Context descended from the one returned by +// the 'A' call to .Lock(). +type nestedMutex struct { + inner sync.Mutex + depth int +} + +type nestedMutexCtxKey struct{} + +// Lock locks the mutex. It is invalid to use a Context returned from +// Lock in a different goroutine than the one it was created in. It +// is invalid to use a Context returned from Lock after the mutex has +// subsequently become unlocked. +func (m *nestedMutex) Lock(ctx context.Context) context.Context { + if other, ok := ctx.Value(nestedMutexCtxKey{}).(*nestedMutex); ok && other == m { + m.depth++ + return ctx + } + m.inner.Lock() + return context.WithValue(ctx, nestedMutexCtxKey{}, m) +} + +// Unlock unlocks the mutex. It is invalid to call Unlock if the +// mutex is not already locked. It is invalid to call Unlock from +// multiple goroutines simultaneously. +func (m *nestedMutex) Unlock() { + if m.depth > 0 { + m.depth-- + } else { + m.inner.Unlock() + } +} diff --git a/lib/btrfsutil/old_rebuilt_forrest.go b/lib/btrfsutil/old_rebuilt_forrest.go new file mode 100644 index 0000000..b7663fa --- /dev/null +++ b/lib/btrfsutil/old_rebuilt_forrest.go @@ -0,0 +1,363 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrfsutil + +import ( + "context" + "fmt" + iofs "io/fs" + "sync" + + "github.com/datawire/dlib/derror" + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" +) + +type treeIndex struct { + TreeRootErr error + Items *containers.RBTree[treeIndexValue] + Errors *containers.IntervalTree[btrfsprim.Key, treeIndexError] +} + +type treeIndexError struct { + Path SkinnyPath + Err error +} + +type treeIndexValue struct { + Path SkinnyPath + Key btrfsprim.Key + ItemSize uint32 +} + +// Compare implements containers.Ordered. +func (a treeIndexValue) Compare(b treeIndexValue) int { + return a.Key.Compare(b.Key) +} + +func newTreeIndex(arena *SkinnyPathArena) treeIndex { + return treeIndex{ + Items: new(containers.RBTree[treeIndexValue]), + Errors: &containers.IntervalTree[btrfsprim.Key, treeIndexError]{ + MinFn: func(err treeIndexError) btrfsprim.Key { + return arena.Inflate(err.Path).Node(-1).ToKey + }, + MaxFn: func(err treeIndexError) btrfsprim.Key { + return arena.Inflate(err.Path).Node(-1).ToMaxKey + }, + }, + } +} + +type brokenTrees struct { + ctx context.Context //nolint:containedctx // don't have an option while keeping the same API + inner *btrfs.FS + + arena *SkinnyPathArena + + // btrfsprim.ROOT_TREE_OBJECTID + rootTreeMu sync.Mutex + rootTreeIndex *treeIndex + // for all other trees + treeMu sync.Mutex + treeIndexes map[btrfsprim.ObjID]treeIndex +} + +var _ btrfstree.TreeOperator = (*brokenTrees)(nil) + +// NewBrokenTrees wraps a *btrfs.FS to support looking up information +// from broken trees. +// +// Of the btrfstree.TreeOperator methods: +// +// - TreeWalk works on broken trees +// - TreeLookup relies on the tree being properly ordered (which a +// broken tree might not be). +// - TreeSearch relies on the tree being properly ordered (which a +// broken tree might not be). +// - TreeSearchAll relies on the tree being properly ordered (which a +// broken tree might not be), and a bad node may cause it to not +// return a truncated list of results. +// +// NewBrokenTrees attempts to remedy these deficiencies by using +// .TreeWalk to build an out-of-FS index of all of the items in the +// tree, and re-implements TreeLookup, TreeSearch, and TreeSearchAll +// using that index. +func NewBrokenTrees(ctx context.Context, inner *btrfs.FS) interface { + btrfstree.TreeOperator + Superblock() (*btrfstree.Superblock, error) + ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) + Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) +} { + return &brokenTrees{ + ctx: ctx, + inner: inner, + } +} + +func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex { + var treeRoot *btrfstree.TreeRoot + var sb *btrfstree.Superblock + var err error + if treeID == btrfsprim.ROOT_TREE_OBJECTID { + bt.rootTreeMu.Lock() + defer bt.rootTreeMu.Unlock() + if bt.rootTreeIndex != nil { + return *bt.rootTreeIndex + } + sb, err = bt.inner.Superblock() + if err == nil { + treeRoot, err = btrfstree.LookupTreeRoot(bt.inner, *sb, treeID) + } + } else { + bt.treeMu.Lock() + defer bt.treeMu.Unlock() + if bt.treeIndexes == nil { + bt.treeIndexes = make(map[btrfsprim.ObjID]treeIndex) + } + if cacheEntry, exists := bt.treeIndexes[treeID]; exists { + return cacheEntry + } + sb, err = bt.inner.Superblock() + if err == nil { + treeRoot, err = btrfstree.LookupTreeRoot(bt, *sb, treeID) + } + } + if bt.arena == nil { + var _sb btrfstree.Superblock + if sb != nil { + _sb = *sb + } + bt.arena = &SkinnyPathArena{ + FS: bt.inner, + SB: _sb, + } + } + cacheEntry := newTreeIndex(bt.arena) + if err != nil { + cacheEntry.TreeRootErr = err + } else { + dlog.Infof(bt.ctx, "indexing tree %v...", treeID) + bt.rawTreeWalk(*treeRoot, cacheEntry, nil) + dlog.Infof(bt.ctx, "... done indexing tree %v", treeID) + } + if treeID == btrfsprim.ROOT_TREE_OBJECTID { + bt.rootTreeIndex = &cacheEntry + } else { + bt.treeIndexes[treeID] = cacheEntry + } + return cacheEntry +} + +func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex, walked *[]btrfsprim.Key) { + btrfstree.TreeOperatorImpl{NodeSource: bt.inner}.RawTreeWalk( + bt.ctx, + root, + func(err *btrfstree.TreeError) { + if len(err.Path) > 0 && err.Path.Node(-1).ToNodeAddr == 0 { + // This is a panic because on the filesystems I'm working with it more likely + // indicates a bug in my item parser than a problem with the filesystem. + panic(fmt.Errorf("TODO: error parsing item: %w", err)) + } + cacheEntry.Errors.Insert(treeIndexError{ + Path: bt.arena.Deflate(err.Path), + Err: err.Err, + }) + }, + btrfstree.TreeWalkHandler{ + Item: func(path btrfstree.TreePath, item btrfstree.Item) error { + if cacheEntry.Items.Search(func(v treeIndexValue) int { return item.Key.Compare(v.Key) }) != nil { + // This is a panic because I'm not really sure what the best way to + // handle this is, and so if this happens I want the program to crash + // and force me to figure out how to handle it. + panic(fmt.Errorf("dup key=%v in tree=%v", item.Key, root.TreeID)) + } + cacheEntry.Items.Insert(treeIndexValue{ + Path: bt.arena.Deflate(path), + Key: item.Key, + ItemSize: item.BodySize, + }) + if walked != nil { + *walked = append(*walked, item.Key) + } + return nil + }, + }, + ) +} + +func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) { + item, err := bt.TreeSearch(treeID, btrfstree.KeySearch(key.Compare)) + if err != nil { + err = fmt.Errorf("item with key=%v: %w", key, err) + } + return item, err +} + +func (bt *brokenTrees) addErrs(index treeIndex, fn func(btrfsprim.Key, uint32) int, err error) error { + var errs derror.MultiError + index.Errors.Subrange( + func(k btrfsprim.Key) int { return fn(k, 0) }, + func(v treeIndexError) bool { + errs = append(errs, &btrfstree.TreeError{ + Path: bt.arena.Inflate(v.Path), + Err: v.Err, + }) + return true + }) + if len(errs) == 0 { + return err + } + if err != nil { + errs = append(errs, err) + } + return errs +} + +func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) (btrfstree.Item, error) { + index := bt.treeIndex(treeID) + if index.TreeRootErr != nil { + return btrfstree.Item{}, index.TreeRootErr + } + + indexItem := index.Items.Search(func(indexItem treeIndexValue) int { + return fn(indexItem.Key, indexItem.ItemSize) + }) + if indexItem == nil { + return btrfstree.Item{}, bt.addErrs(index, fn, iofs.ErrNotExist) + } + + itemPath := bt.arena.Inflate(indexItem.Value.Path) + node, err := bt.inner.ReadNode(itemPath.Parent()) + defer btrfstree.FreeNodeRef(node) + if err != nil { + return btrfstree.Item{}, bt.addErrs(index, fn, err) + } + + item := node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx] + item.Body = item.Body.CloneItem() + + // Since we were only asked to return 1 item, it isn't + // necessary to augment this `nil` with bt.addErrs(). + return item, nil +} + +func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) ([]btrfstree.Item, error) { + index := bt.treeIndex(treeID) + if index.TreeRootErr != nil { + return nil, index.TreeRootErr + } + + var indexItems []treeIndexValue + index.Items.Subrange( + func(indexItem treeIndexValue) int { return fn(indexItem.Key, indexItem.ItemSize) }, + func(node *containers.RBNode[treeIndexValue]) bool { + indexItems = append(indexItems, node.Value) + return true + }) + if len(indexItems) == 0 { + return nil, bt.addErrs(index, fn, iofs.ErrNotExist) + } + + ret := make([]btrfstree.Item, len(indexItems)) + var node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] + for i := range indexItems { + itemPath := bt.arena.Inflate(indexItems[i].Path) + if node == nil || node.Addr != itemPath.Node(-2).ToNodeAddr { + var err error + btrfstree.FreeNodeRef(node) + node, err = bt.inner.ReadNode(itemPath.Parent()) + if err != nil { + btrfstree.FreeNodeRef(node) + return nil, bt.addErrs(index, fn, err) + } + } + ret[i] = node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx] + ret[i].Body = ret[i].Body.CloneItem() + } + btrfstree.FreeNodeRef(node) + + return ret, bt.addErrs(index, fn, nil) +} + +func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) { + index := bt.treeIndex(treeID) + if index.TreeRootErr != nil { + errHandle(&btrfstree.TreeError{ + Path: btrfstree.TreePath{{ + FromTree: treeID, + ToMaxKey: btrfsprim.MaxKey, + }}, + Err: index.TreeRootErr, + }) + return + } + var node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] + index.Items.Range(func(indexItem *containers.RBNode[treeIndexValue]) bool { + if ctx.Err() != nil { + return false + } + if bt.ctx.Err() != nil { + return false + } + if cbs.Item != nil { + itemPath := bt.arena.Inflate(indexItem.Value.Path) + if node == nil || node.Addr != itemPath.Node(-2).ToNodeAddr { + var err error + btrfstree.FreeNodeRef(node) + node, err = bt.inner.ReadNode(itemPath.Parent()) + if err != nil { + btrfstree.FreeNodeRef(node) + errHandle(&btrfstree.TreeError{Path: itemPath, Err: err}) + return true + } + } + item := node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx] + if err := cbs.Item(itemPath, item); err != nil { + errHandle(&btrfstree.TreeError{Path: itemPath, Err: err}) + } + } + return true + }) + btrfstree.FreeNodeRef(node) +} + +func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) { + return bt.inner.Superblock() +} + +func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { + return bt.inner.ReadAt(p, off) +} + +func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) { + sb, err := bt.Superblock() + if err != nil { + return nil, err + } + index := bt.treeIndex(treeID) + if index.TreeRootErr != nil { + return nil, index.TreeRootErr + } + nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeAddr, btrfstree.NodeExpectations{}) + defer btrfstree.FreeNodeRef(nodeRef) + if err != nil { + return nil, err + } + var ret []btrfsprim.Key + bt.rawTreeWalk(btrfstree.TreeRoot{ + TreeID: treeID, + RootNode: nodeAddr, + Level: nodeRef.Data.Head.Level, + Generation: nodeRef.Data.Head.Generation, + }, index, &ret) + return ret, nil +} diff --git a/lib/btrfsutil/open.go b/lib/btrfsutil/open.go new file mode 100644 index 0000000..c5ee314 --- /dev/null +++ b/lib/btrfsutil/open.go @@ -0,0 +1,46 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrfsutil + +import ( + "context" + "fmt" + "os" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +func Open(ctx context.Context, flag int, filenames ...string) (*btrfs.FS, error) { + fs := new(btrfs.FS) + for i, filename := range filenames { + dlog.Debugf(ctx, "Adding device file %d/%d %q...", i, len(filenames), filename) + osFile, err := os.OpenFile(filename, flag, 0) + if err != nil { + _ = fs.Close() + return nil, fmt.Errorf("device file %q: %w", filename, err) + } + typedFile := &diskio.OSFile[btrfsvol.PhysicalAddr]{ + File: osFile, + } + bufFile := diskio.NewBufferedFile[btrfsvol.PhysicalAddr]( + typedFile, + //nolint:gomnd // False positive: gomnd.ignored-functions=[textui.Tunable] doesn't support type params. + textui.Tunable[btrfsvol.PhysicalAddr](16*1024), // block size: 16KiB + textui.Tunable(1024), // number of blocks to buffer; total of 16MiB + ) + devFile := &btrfs.Device{ + File: bufFile, + } + if err := fs.AddDevice(ctx, devFile); err != nil { + return nil, fmt.Errorf("device file %q: %w", filename, err) + } + } + return fs, nil +} diff --git a/lib/btrfsutil/print_addrspace.go b/lib/btrfsutil/print_addrspace.go new file mode 100644 index 0000000..e85e055 --- /dev/null +++ b/lib/btrfsutil/print_addrspace.go @@ -0,0 +1,73 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrfsinspect + +import ( + "io" + "sort" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +func PrintLogicalSpace(out io.Writer, fs *btrfs.FS) { + mappings := fs.LV.Mappings() + var prevBeg, prevEnd btrfsvol.LogicalAddr + var sumHole, sumChunk btrfsvol.AddrDelta + for _, mapping := range mappings { + if mapping.LAddr > prevEnd { + size := mapping.LAddr.Sub(prevEnd) + textui.Fprintf(out, "logical_hole laddr=%v size=%v\n", prevEnd, size) + sumHole += size + } + if mapping.LAddr != prevBeg { + if !mapping.Flags.OK { + textui.Fprintf(out, "chunk laddr=%v size=%v flags=(missing)\n", + mapping.LAddr, mapping.Size) + } else { + textui.Fprintf(out, "chunk laddr=%v size=%v flags=%v\n", + mapping.LAddr, mapping.Size, mapping.Flags.Val) + } + } + textui.Fprintf(out, "\tstripe dev_id=%v paddr=%v\n", + mapping.PAddr.Dev, mapping.PAddr.Addr) + sumChunk += mapping.Size + prevBeg = mapping.LAddr + prevEnd = mapping.LAddr.Add(mapping.Size) + } + textui.Fprintf(out, "total logical holes = %v (%d)\n", sumHole, int64(sumHole)) + textui.Fprintf(out, "total logical chunks = %v (%d)\n", sumChunk, int64(sumChunk)) + textui.Fprintf(out, "total logical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) +} + +func PrintPhysicalSpace(out io.Writer, fs *btrfs.FS) { + mappings := fs.LV.Mappings() + sort.Slice(mappings, func(i, j int) bool { + return mappings[i].PAddr.Compare(mappings[j].PAddr) < 0 + }) + + var prevDev btrfsvol.DeviceID = 0 + var prevEnd btrfsvol.PhysicalAddr + var sumHole, sumExt btrfsvol.AddrDelta + for _, mapping := range mappings { + if mapping.PAddr.Dev != prevDev { + prevDev = mapping.PAddr.Dev + prevEnd = 0 + } + if mapping.PAddr.Addr > prevEnd { + size := mapping.PAddr.Addr.Sub(prevEnd) + textui.Fprintf(out, "physical_hole paddr=%v size=%v\n", prevEnd, size) + sumHole += size + } + textui.Fprintf(out, "devext dev=%v paddr=%v size=%v laddr=%v\n", + mapping.PAddr.Dev, mapping.PAddr.Addr, mapping.Size, mapping.LAddr) + sumExt += mapping.Size + prevEnd = mapping.PAddr.Addr.Add(mapping.Size) + } + textui.Fprintf(out, "total physical holes = %v (%d)\n", sumHole, int64(sumHole)) + textui.Fprintf(out, "total physical extents = %v (%d)\n", sumExt, int64(sumExt)) + textui.Fprintf(out, "total physical addr space = %v (%d)\n", prevEnd, int64(prevEnd)) +} diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go new file mode 100644 index 0000000..dbbc6eb --- /dev/null +++ b/lib/btrfsutil/rebuilt_forrest.go @@ -0,0 +1,208 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrees + +import ( + "context" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + pkggraph "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/slices" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +type Callbacks interface { + AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) + AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) + LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) + LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) +} + +// RebuiltForrest is an abstraction for rebuilding and accessing +// potentially broken btrees. +// +// It is conceptually a btrfstree.TreeOperator, and adds similar +// broken-tree handling to btrfsutil.BrokenForrest. However, the API +// is different thant btrfstree.TreeOperator, and is much more +// efficient than btrfsutil.BrokenForrest. +// +// The efficiency improvements are possible because of the API +// differences, which are necessary for how it is used in +// rebuildnodes: +// +// - it consumes an already-read graph.Graph instead of reading the +// graph itself +// +// - it does not use `btrfstree.TreePath` +// +// - it does not keep track of errors encountered in a tree +// +// Additionally, it provides some functionality that +// btrfsutil.BrokenForrest does not: +// +// - it provides a .LeafToRoots() method to advise on what +// additional roots should be added +// +// - it provides a .COWDistance() method to compare how related two +// trees are +// +// A zero RebuiltForrest is invalid; it must be initialized with +// NewRebuiltForrest(). +type RebuiltForrest struct { + // static + sb btrfstree.Superblock + graph pkggraph.Graph + keyIO *keyio.Handle + cb Callbacks + + // mutable + treesMu nestedMutex + trees map[btrfsprim.ObjID]*RebuiltTree // must hold .treesMu to access + leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]] + incItems containers.ARCache[btrfsprim.ObjID, *itemIndex] + excItems containers.ARCache[btrfsprim.ObjID, *itemIndex] +} + +// NewRebuiltForrest returns a new RebuiltForrest instance. All of +// the callbacks must be non-nil. +func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, cb Callbacks) *RebuiltForrest { + return &RebuiltForrest{ + sb: sb, + graph: graph, + keyIO: keyIO, + cb: cb, + + trees: make(map[btrfsprim.ObjID]*RebuiltTree), + leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{ + MaxLen: textui.Tunable(8), + }, + incItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{ + MaxLen: textui.Tunable(8), + }, + excItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{ + MaxLen: textui.Tunable(8), + }, + } +} + +// Tree returns a given tree, initializing it if nescessary. If it is +// unable to initialize the tree, then nil is returned, and nothing is +// done to the forrest. +// +// The tree is initialized with the normal root node of the tree. +func (ts *RebuiltForrest) Tree(ctx context.Context, treeID btrfsprim.ObjID) *RebuiltTree { + ctx = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() + if !ts.addTree(ctx, treeID, nil) { + return nil + } + return ts.trees[treeID] +} + +func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, stack []btrfsprim.ObjID) (ok bool) { + if tree, ok := ts.trees[treeID]; ok { + return tree != nil + } + defer func() { + if !ok { + // Store a negative cache of this. tree.AddRoot() for the ROOT or UUID + // trees will call .flushNegativeCache(). + ts.trees[treeID] = nil + } + }() + stack = append(stack, treeID) + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack) + dlog.Info(ctx, "adding tree...") + if slices.Contains(treeID, stack[:len(stack)-1]) { + dlog.Errorf(ctx, "failed to add tree: loop detected: %v", stack) + return false + } + + tree := &RebuiltTree{ + ID: treeID, + Roots: make(containers.Set[btrfsvol.LogicalAddr]), + forrest: ts, + } + var root btrfsvol.LogicalAddr + switch treeID { + case btrfsprim.ROOT_TREE_OBJECTID: + root = ts.sb.RootTree + case btrfsprim.CHUNK_TREE_OBJECTID: + root = ts.sb.ChunkTree + case btrfsprim.TREE_LOG_OBJECTID: + root = ts.sb.LogTree + case btrfsprim.BLOCK_GROUP_TREE_OBJECTID: + root = ts.sb.BlockGroupRoot + default: + if !ts.addTree(ctx, btrfsprim.ROOT_TREE_OBJECTID, stack) { + dlog.Error(ctx, "failed to add tree: add ROOT_TREE") + return false + } + rootOff, rootItem, ok := ts.cb.LookupRoot(ctx, treeID) + if !ok { + dlog.Error(ctx, "failed to add tree: lookup ROOT_ITEM") + return false + } + root = rootItem.ByteNr + tree.UUID = rootItem.UUID + if rootItem.ParentUUID != (btrfsprim.UUID{}) { + tree.ParentGen = rootOff + if !ts.addTree(ctx, btrfsprim.UUID_TREE_OBJECTID, stack) { + return false + } + parentID, ok := ts.cb.LookupUUID(ctx, rootItem.ParentUUID) + if !ok { + dlog.Error(ctx, "failed to add tree: lookup UUID") + return false + } + if !ts.addTree(ctx, parentID, stack) { + dlog.Error(ctx, "failed to add tree: add parent tree") + return false + } + tree.Parent = ts.trees[parentID] + } + } + + ts.trees[treeID] = tree + if root != 0 { + tree.AddRoot(ctx, root) + } + + return true +} + +func (ts *RebuiltForrest) flushNegativeCache(ctx context.Context) { + _ = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() + for treeID, tree := range ts.trees { + if tree == nil { + delete(ts.trees, treeID) + } + } +} + +// ListRoots returns a listing of all initialized trees and their root +// nodes. +// +// Do not mutate the set of roots for a tree; it is a pointer to the +// RebuiltForrest's internal set! +func (ts *RebuiltForrest) ListRoots(ctx context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] { + _ = ts.treesMu.Lock(ctx) + defer ts.treesMu.Unlock() + ret := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr]) + for treeID, tree := range ts.trees { + if tree != nil { + ret[treeID] = tree.Roots + } + } + return ret +} diff --git a/lib/btrfsutil/rebuilt_readitem.go b/lib/btrfsutil/rebuilt_readitem.go new file mode 100644 index 0000000..56da32d --- /dev/null +++ b/lib/btrfsutil/rebuilt_readitem.go @@ -0,0 +1,172 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package keyio + +import ( + "context" + "fmt" + "sync" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +type ItemPtr struct { + Node btrfsvol.LogicalAddr + Idx int +} + +func (ptr ItemPtr) String() string { + return fmt.Sprintf("node@%v[%v]", ptr.Node, ptr.Idx) +} + +type SizeAndErr struct { + Size uint64 + Err error +} + +type FlagsAndErr struct { + NoDataSum bool + Err error +} + +type Handle struct { + rawFile diskio.File[btrfsvol.LogicalAddr] + sb btrfstree.Superblock + graph graph.Graph + + Flags map[ItemPtr]FlagsAndErr // INODE_ITEM + Names map[ItemPtr][]byte // DIR_INDEX + Sizes map[ItemPtr]SizeAndErr // EXTENT_CSUM and EXTENT_DATA + + mu sync.Mutex + cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]] +} + +func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle { + return &Handle{ + rawFile: file, + sb: sb, + + Flags: make(map[ItemPtr]FlagsAndErr), + Names: make(map[ItemPtr][]byte), + Sizes: make(map[ItemPtr]SizeAndErr), + + cache: containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]]{ + MaxLen: textui.Tunable(8), + OnRemove: func(_ btrfsvol.LogicalAddr, nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { + btrfstree.FreeNodeRef(nodeRef) + }, + }, + } +} + +func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { + for i, item := range nodeRef.Data.BodyLeaf { + ptr := ItemPtr{ + Node: nodeRef.Addr, + Idx: i, + } + switch itemBody := item.Body.(type) { + case *btrfsitem.Inode: + o.Flags[ptr] = FlagsAndErr{ + NoDataSum: itemBody.Flags.Has(btrfsitem.INODE_NODATASUM), + Err: nil, + } + case *btrfsitem.DirEntry: + if item.Key.ItemType == btrfsprim.DIR_INDEX_KEY { + o.Names[ptr] = append([]byte(nil), itemBody.Name...) + } + case *btrfsitem.ExtentCSum: + o.Sizes[ptr] = SizeAndErr{ + Size: uint64(itemBody.Size()), + Err: nil, + } + case *btrfsitem.FileExtent: + size, err := itemBody.Size() + o.Sizes[ptr] = SizeAndErr{ + Size: uint64(size), + Err: err, + } + case *btrfsitem.Error: + switch item.Key.ItemType { + case btrfsprim.INODE_ITEM_KEY: + o.Flags[ptr] = FlagsAndErr{ + Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w", + ptr, nodeRef.Data.Head.Owner, item.Key, itemBody.Err), + } + case btrfsprim.EXTENT_CSUM_KEY, btrfsprim.EXTENT_DATA_KEY: + o.Sizes[ptr] = SizeAndErr{ + Err: fmt.Errorf("error decoding item: ptr=%v (tree=%v key=%v): %w", + ptr, nodeRef.Data.Head.Owner, item.Key, itemBody.Err), + } + } + } + } +} + +func (o *Handle) SetGraph(graph graph.Graph) { + o.graph = graph +} + +func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { + if cached, ok := o.cache.Load(laddr); ok { + dlog.Tracef(ctx, "cache-hit node@%v", laddr) + return cached + } + + graphInfo, ok := o.graph.Nodes[laddr] + if !ok { + panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr)) + } + + dlog.Debugf(ctx, "cache-miss node@%v, reading...", laddr) + ref, err := btrfstree.ReadNode(o.rawFile, o.sb, laddr, btrfstree.NodeExpectations{ + LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, + Level: containers.Optional[uint8]{OK: true, Val: graphInfo.Level}, + Generation: containers.Optional[btrfsprim.Generation]{OK: true, Val: graphInfo.Generation}, + Owner: func(treeID btrfsprim.ObjID) error { + if treeID != graphInfo.Owner { + return fmt.Errorf("expected owner=%v but claims to have owner=%v", + graphInfo.Owner, treeID) + } + return nil + }, + MinItem: containers.Optional[btrfsprim.Key]{OK: true, Val: graphInfo.MinItem}, + MaxItem: containers.Optional[btrfsprim.Key]{OK: true, Val: graphInfo.MaxItem}, + }) + if err != nil { + panic(fmt.Errorf("should not happen: i/o error: %w", err)) + } + + o.cache.Store(laddr, ref) + + return ref +} + +func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { + o.mu.Lock() + defer o.mu.Unlock() + if o.graph.Nodes[ptr.Node].Level != 0 { + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for non-leaf node@%v", ptr.Node)) + } + if ptr.Idx < 0 { + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for negative item index: %v", ptr.Idx)) + } + items := o.readNode(ctx, ptr.Node).Data.BodyLeaf + if ptr.Idx >= len(items) { + panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for out-of-bounds item index: index=%v len=%v", + ptr.Idx, len(items))) + } + return items[ptr.Idx].Body.CloneItem() +} diff --git a/lib/btrfsutil/rebuilt_tree.go b/lib/btrfsutil/rebuilt_tree.go new file mode 100644 index 0000000..39d8871 --- /dev/null +++ b/lib/btrfsutil/rebuilt_tree.go @@ -0,0 +1,357 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrees + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/datawire/dlib/dlog" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/maps" + "git.lukeshu.com/btrfs-progs-ng/lib/slices" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +type RebuiltTree struct { + // static + ID btrfsprim.ObjID + UUID btrfsprim.UUID + Parent *RebuiltTree + ParentGen btrfsprim.Generation // offset of this tree's root item + forrest *RebuiltForrest + + // mutable + mu sync.RWMutex + Roots containers.Set[btrfsvol.LogicalAddr] + // There are 3 more mutable "members" that are protected by + // `mu`; but they live in a shared LRUcache. They are all + // derived from tree.Roots, which is why it's OK if they get + // evicted. + // + // 1. tree.leafToRoots() = tree.forrest.leafs.Load(tree.ID) + // 2. tree.Items() = tree.forrest.incItems.Load(tree.ID) + // 3. tree.PotentialItems() = tree.forrest.excItems.Load(tree.ID) +} + +// LRU member 1: .leafToRoots() //////////////////////////////////////////////////////////////////////////////////////// + +// leafToRoots returns all leafs (lvl=0) in the filesystem that pass +// .isOwnerOK, whether or not they're in the tree. +func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { + return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID)) + + nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) + + var stats textui.Portion[int] + stats.D = len(tree.forrest.graph.Nodes) + progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + progress := func() { + stats.N = len(nodeToRoots) + progressWriter.Set(stats) + } + + progress() + for _, node := range maps.SortedKeys(tree.forrest.graph.Nodes) { + tree.indexNode(ctx, node, nodeToRoots, progress, nil) + } + progressWriter.Done() + + ret := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) + for node, roots := range nodeToRoots { + if tree.forrest.graph.Nodes[node].Level == 0 && len(roots) > 0 { + ret[node] = roots + } + } + return ret + }) +} + +func (tree *RebuiltTree) indexNode(ctx context.Context, node btrfsvol.LogicalAddr, index map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr], progress func(), stack []btrfsvol.LogicalAddr) { + defer progress() + if err := ctx.Err(); err != nil { + return + } + if _, done := index[node]; done { + return + } + if slices.Contains(node, stack) { + // This is a panic because tree.forrest.graph.FinalCheck() should + // have already checked for loops. + panic("loop") + } + if !tree.isOwnerOK(tree.forrest.graph.Nodes[node].Owner, tree.forrest.graph.Nodes[node].Generation) { + index[node] = nil + return + } + + // tree.leafToRoots + stack = append(stack, node) + var roots containers.Set[btrfsvol.LogicalAddr] + for _, kp := range tree.forrest.graph.EdgesTo[node] { + if !tree.isOwnerOK(tree.forrest.graph.Nodes[kp.FromNode].Owner, tree.forrest.graph.Nodes[kp.FromNode].Generation) { + continue + } + tree.indexNode(ctx, kp.FromNode, index, progress, stack) + if len(index[kp.FromNode]) > 0 { + if roots == nil { + roots = make(containers.Set[btrfsvol.LogicalAddr]) + } + roots.InsertFrom(index[kp.FromNode]) + } + } + if roots == nil { + roots = containers.NewSet[btrfsvol.LogicalAddr](node) + } + index[node] = roots +} + +// isOwnerOK returns whether it is permissible for a node with +// .Head.Owner=owner to be in this tree. +func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generation) bool { + for { + if owner == tree.ID { + return true + } + if tree.Parent == nil || gen >= tree.ParentGen { + return false + } + tree = tree.Parent + } +} + +// LRU members 2 and 3: .Items() and .PotentialItems() ///////////////////////////////////////////////////////////////// + +// Items returns a map of the items contained in this tree. +// +// Do not mutate the returned map; it is a pointer to the +// RebuiltTree's internal map! +func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID)) + return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny) +} + +// PotentialItems returns a map of items that could be added to this +// tree with .AddRoot(). +// +// Do not mutate the returned map; it is a pointer to the +// RebuiltTree's internal map! +func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID)) + return tree.items(ctx, &tree.forrest.excItems, + func(roots containers.Set[btrfsvol.LogicalAddr]) bool { + return !tree.Roots.HasAny(roots) + }) +} + +type itemIndex = containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] + +type itemStats struct { + Leafs textui.Portion[int] + NumItems int + NumDups int +} + +func (s itemStats) String() string { + return textui.Sprintf("%v (%v items, %v dups)", + s.Leafs, s.NumItems, s.NumDups) +} + +func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex], + leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool, +) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { + tree.mu.RLock() + defer tree.mu.RUnlock() + + return containers.LoadOrElse(cache, tree.ID, func(btrfsprim.ObjID) *itemIndex { + var leafs []btrfsvol.LogicalAddr + for leaf, roots := range tree.leafToRoots(ctx) { + if leafFn(roots) { + leafs = append(leafs, leaf) + } + } + slices.Sort(leafs) + + var stats itemStats + stats.Leafs.D = len(leafs) + progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + + index := new(containers.SortedMap[btrfsprim.Key, keyio.ItemPtr]) + for i, leaf := range leafs { + stats.Leafs.N = i + progressWriter.Set(stats) + for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items { + newPtr := keyio.ItemPtr{ + Node: leaf, + Idx: j, + } + if oldPtr, exists := index.Load(itemKey); !exists { + index.Store(itemKey, newPtr) + stats.NumItems++ + } else { + if tree.ShouldReplace(oldPtr.Node, newPtr.Node) { + index.Store(itemKey, newPtr) + } + stats.NumDups++ + } + progressWriter.Set(stats) + } + } + if stats.Leafs.N > 0 { + stats.Leafs.N = len(leafs) + progressWriter.Set(stats) + progressWriter.Done() + } + + return index + }) +} + +func (tree *RebuiltTree) ShouldReplace(oldNode, newNode btrfsvol.LogicalAddr) bool { + oldDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[oldNode].Owner) + newDist, _ := tree.COWDistance(tree.forrest.graph.Nodes[newNode].Owner) + switch { + case newDist < oldDist: + // Replace the old one with the new lower-dist one. + return true + case newDist > oldDist: + // Retain the old lower-dist one. + return false + default: + oldGen := tree.forrest.graph.Nodes[oldNode].Generation + newGen := tree.forrest.graph.Nodes[newNode].Generation + switch { + case newGen > oldGen: + // Replace the old one with the new higher-gen one. + return true + case newGen < oldGen: + // Retain the old higher-gen one. + return false + default: + // TODO: This is a panic because I'm not really sure what the + // best way to handle this is, and so if this happens I want the + // program to crash and force me to figure out how to handle it. + panic(fmt.Errorf("dup nodes in tree=%v: old=%v=%v ; new=%v=%v", + tree.ID, + oldNode, tree.forrest.graph.Nodes[oldNode], + newNode, tree.forrest.graph.Nodes[newNode])) + } + } +} + +// .AddRoot() ////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type rootStats struct { + Leafs textui.Portion[int] + AddedLeafs int + AddedItems int +} + +func (s rootStats) String() string { + return textui.Sprintf("%v (added %v leafs, added %v items)", + s.Leafs, s.AddedLeafs, s.AddedItems) +} + +// AddRoot adds an additional root node to the tree. It is useful to +// call .AddRoot() to re-attach part of the tree that has been broken +// off. +func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalAddr) { + tree.mu.Lock() + defer tree.mu.Unlock() + ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode)) + dlog.Info(ctx, "adding root...") + + leafToRoots := tree.leafToRoots(ctx) + + var stats rootStats + stats.Leafs.D = len(leafToRoots) + progressWriter := textui.NewProgress[rootStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) + for i, leaf := range maps.SortedKeys(leafToRoots) { + stats.Leafs.N = i + progressWriter.Set(stats) + + if tree.Roots.HasAny(leafToRoots[leaf]) || !leafToRoots[leaf].Has(rootNode) { + continue + } + + stats.AddedLeafs++ + progressWriter.Set(stats) + + for _, itemKey := range tree.forrest.graph.Nodes[leaf].Items { + tree.forrest.cb.AddedItem(ctx, tree.ID, itemKey) + stats.AddedItems++ + progressWriter.Set(stats) + } + } + stats.Leafs.N = len(leafToRoots) + progressWriter.Set(stats) + progressWriter.Done() + + tree.Roots.Insert(rootNode) + tree.forrest.incItems.Delete(tree.ID) // force re-gen + tree.forrest.excItems.Delete(tree.ID) // force re-gen + + if (tree.ID == btrfsprim.ROOT_TREE_OBJECTID || tree.ID == btrfsprim.UUID_TREE_OBJECTID) && stats.AddedItems > 0 { + tree.forrest.flushNegativeCache(ctx) + } + tree.forrest.cb.AddedRoot(ctx, tree.ID, rootNode) +} + +// main public API ///////////////////////////////////////////////////////////////////////////////////////////////////// + +// COWDistance returns how many COW-snapshots down the 'tree' is from +// the 'parent'. +func (tree *RebuiltTree) COWDistance(parentID btrfsprim.ObjID) (dist int, ok bool) { + for { + if parentID == tree.ID { + return dist, true + } + if tree.Parent == nil { + return 0, false + } + tree = tree.Parent + dist++ + } +} + +// ReadItem reads an item from a tree. +func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) btrfsitem.Item { + ptr, ok := tree.Items(ctx).Load(key) + if !ok { + panic(fmt.Errorf("should not happen: btrees.RebuiltTree.ReadItem called for not-included key: %v", key)) + } + return tree.forrest.keyIO.ReadItem(ctx, ptr) +} + +// LeafToRoots returns the list of potential roots (to pass to +// .AddRoot) that include a given leaf-node. +func (tree *RebuiltTree) LeafToRoots(ctx context.Context, leaf btrfsvol.LogicalAddr) containers.Set[btrfsvol.LogicalAddr] { + if tree.forrest.graph.Nodes[leaf].Level != 0 { + panic(fmt.Errorf("should not happen: (tree=%v).LeafToRoots(leaf=%v): not a leaf", + tree.ID, leaf)) + } + tree.mu.RLock() + defer tree.mu.RUnlock() + ret := make(containers.Set[btrfsvol.LogicalAddr]) + for root := range tree.leafToRoots(ctx)[leaf] { + if tree.Roots.Has(root) { + panic(fmt.Errorf("should not happen: (tree=%v).LeafToRoots(leaf=%v): tree contains root=%v but not leaf", + tree.ID, leaf, root)) + } + ret.Insert(root) + } + if len(ret) == 0 { + return nil + } + return ret +} diff --git a/lib/btrfsutil/skinny_paths.go b/lib/btrfsutil/skinny_paths.go new file mode 100644 index 0000000..1695990 --- /dev/null +++ b/lib/btrfsutil/skinny_paths.go @@ -0,0 +1,146 @@ +// Copyright (C) 2022-2023 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrfsutil + +import ( + "fmt" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" + "git.lukeshu.com/btrfs-progs-ng/lib/diskio" + "git.lukeshu.com/btrfs-progs-ng/lib/textui" +) + +type skinnyItem struct { + Node btrfsvol.LogicalAddr + Item int +} + +type SkinnyPath struct { + Root btrfsvol.LogicalAddr + Items []int +} + +type SkinnyPathArena struct { + FS diskio.File[btrfsvol.LogicalAddr] + SB btrfstree.Superblock + + fatRoots map[btrfsvol.LogicalAddr]btrfstree.TreePathElem + fatItems containers.ARCache[skinnyItem, btrfstree.TreePathElem] +} + +func (a *SkinnyPathArena) init() { + if a.fatRoots == nil { + a.fatRoots = make(map[btrfsvol.LogicalAddr]btrfstree.TreePathElem) + a.fatItems.MaxLen = textui.Tunable(128 * 1024) + } +} + +func (a *SkinnyPathArena) getItem(parent btrfstree.TreePath, itemIdx int) (btrfstree.TreePathElem, error) { + if itemIdx < 0 { + panic("should not happen") + } + + a.init() + + ret, ok := a.fatItems.Load(skinnyItem{ + Node: parent.Node(-1).ToNodeAddr, + Item: itemIdx, + }) + if ok { + return ret, nil + } + + node, err := btrfstree.ReadNode(a.FS, a.SB, parent.Node(-1).ToNodeAddr, btrfstree.NodeExpectations{}) + defer btrfstree.FreeNodeRef(node) + if err != nil { + return btrfstree.TreePathElem{}, err + } + if node.Data.Head.Level > 0 { + if itemIdx >= len(node.Data.BodyInternal) { + panic("should not happen") + } + for i, item := range node.Data.BodyInternal { + toMaxKey := parent.Node(-1).ToMaxKey + if i+1 < len(node.Data.BodyInternal) { + toMaxKey = node.Data.BodyInternal[i+1].Key.Mm() + } + elem := btrfstree.TreePathElem{ + FromTree: node.Data.Head.Owner, + FromItemIdx: i, + ToNodeAddr: item.BlockPtr, + ToNodeGeneration: item.Generation, + ToNodeLevel: node.Data.Head.Level - 1, + ToKey: item.Key, + ToMaxKey: toMaxKey, + } + a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem) + if i == itemIdx { + ret = elem + } + } + } else { + if itemIdx >= len(node.Data.BodyLeaf) { + panic("should not happen") + } + for i, item := range node.Data.BodyLeaf { + elem := btrfstree.TreePathElem{ + FromTree: node.Data.Head.Owner, + FromItemIdx: i, + ToKey: item.Key, + ToMaxKey: item.Key, + } + a.fatItems.Store(skinnyItem{Node: parent.Node(-1).ToNodeAddr, Item: i}, elem) + if i == itemIdx { + ret = elem + } + } + } + + return ret, nil +} + +func (a *SkinnyPathArena) Deflate(fat btrfstree.TreePath) SkinnyPath { + a.init() + + var ret SkinnyPath + + var prevNode btrfsvol.LogicalAddr + for i, elem := range fat { + if i == 0 { + a.fatRoots[elem.ToNodeAddr] = elem + ret.Root = elem.ToNodeAddr + } else { + a.fatItems.Store(skinnyItem{Node: prevNode, Item: elem.FromItemIdx}, elem) + ret.Items = append(ret.Items, elem.FromItemIdx) + } + prevNode = elem.ToNodeAddr + } + return ret +} + +func (a *SkinnyPathArena) Inflate(skinny SkinnyPath) btrfstree.TreePath { + a.init() + + ret := make(btrfstree.TreePath, 0, 1+len(skinny.Items)) + + root, ok := a.fatRoots[skinny.Root] + if !ok { + panic(fmt.Errorf("SkinnyPathArena.Inflate: no stored TreePathElem for root->%v", + skinny.Root)) + } + ret = append(ret, root) + + for _, itemIdx := range skinny.Items { + elem, err := a.getItem(ret, itemIdx) + if err != nil { + panic(err) + } + ret = append(ret, elem) + } + + return ret +} diff --git a/lib/btrfsutil/walk.go b/lib/btrfsutil/walk.go new file mode 100644 index 0000000..355976a --- /dev/null +++ b/lib/btrfsutil/walk.go @@ -0,0 +1,97 @@ +// Copyright (C) 2022 Luke Shumaker +// +// SPDX-License-Identifier: GPL-2.0-or-later + +package btrfsutil + +import ( + "context" + "fmt" + + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" +) + +type WalkError struct { + TreeName string + Err *btrfstree.TreeError +} + +func (e *WalkError) Unwrap() error { return e.Err } + +func (e *WalkError) Error() string { + return fmt.Sprintf("%v: %v", e.TreeName, e.Err) +} + +type WalkAllTreesHandler struct { + Err func(*WalkError) + // Callbacks for entire trees + PreTree func(name string, id btrfsprim.ObjID) + PostTree func(name string, id btrfsprim.ObjID) + // Callbacks for nodes or smaller + btrfstree.TreeWalkHandler +} + +// WalkAllTrees walks all trees in a *btrfs.FS. Rather than returning +// an error, it calls errCb each time an error is encountered. The +// error will always be of type WalkError. +func WalkAllTrees(ctx context.Context, fs btrfstree.TreeOperator, cbs WalkAllTreesHandler) { + var treeName string + + trees := []struct { + Name string + ID btrfsprim.ObjID + }{ + { + Name: "root tree", + ID: btrfsprim.ROOT_TREE_OBJECTID, + }, + { + Name: "chunk tree", + ID: btrfsprim.CHUNK_TREE_OBJECTID, + }, + { + Name: "log tree", + ID: btrfsprim.TREE_LOG_OBJECTID, + }, + { + Name: "block group tree", + ID: btrfsprim.BLOCK_GROUP_TREE_OBJECTID, + }, + } + origItem := cbs.Item + cbs.Item = func(path btrfstree.TreePath, item btrfstree.Item) error { + if item.Key.ItemType == btrfsitem.ROOT_ITEM_KEY { + trees = append(trees, struct { + Name string + ID btrfsprim.ObjID + }{ + Name: fmt.Sprintf("tree %v (via %v %v)", + item.Key.ObjectID.Format(0), treeName, path), + ID: item.Key.ObjectID, + }) + } + if origItem != nil { + return origItem(path, item) + } + return nil + } + + for i := 0; i < len(trees); i++ { + tree := trees[i] + treeName = tree.Name + if cbs.PreTree != nil { + cbs.PreTree(treeName, tree.ID) + } + fs.TreeWalk( + ctx, + tree.ID, + func(err *btrfstree.TreeError) { cbs.Err(&WalkError{TreeName: treeName, Err: err}) }, + cbs.TreeWalkHandler, + ) + if cbs.PostTree != nil { + cbs.PostTree(treeName, tree.ID) + } + } +} -- cgit v1.2.3-54-g00ecf From 1be85fecedebe9ea37b910a15a5c45dd2e57649d Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 12 Mar 2023 15:17:39 -0600 Subject: Get it to compile with the renamed files #!/bin/bash set -e git ls-files :*.go :!tools/| while read -r file; do pkgname=${file%/*.go} pkgname=${pkgname##*/} if [[ "$pkgname" == btrfs-rec ]]; then pkgname=main fi sed -i "s/^package [^_]*/package ${pkgname}/" "$file" done # btrfsutil #################################################################### gofmt -w -r 'rebuildnodes -> rebuildtrees' cmd lib gofmt -w -r 'btrees -> btrfsutil' cmd lib gofmt -w -r 'keyio -> btrfsutil' cmd lib sed -i 's/func New/func NewGraph/' lib/btrfsutil/graph.go gofmt -w -r 'graph.New -> btrfsutil.NewGraph' cmd lib gofmt -w -r 'graph.Graph -> btrfsutil.Graph' cmd lib sed -i -e '/\/graph"/d' -e 's/pkggraph\.//' lib/btrfsutil/rebuilt_forrest.go gofmt -w -r 'btrfsutil.BrokenForrest -> BrokenForrest ' lib/btrfsutil gofmt -w -r 'btrfsutil.Handle -> Handle ' lib/btrfsutil gofmt -w -r 'btrfsutil.Graph -> Graph ' lib/btrfsutil gofmt -w -r 'btrfsutil.ItemPtr -> ItemPtr ' lib/btrfsutil gofmt -w -r 'Handle -> KeyIO' lib/btrfsutil gofmt -w -r 'btrfsutil.Handle -> btrfsutil.KeyIO' cmd/btrfs-rec/inspect/rebuildtrees/ gofmt -w -r 'NewHandle -> NewKeyIO' cmd lib # rebuildmappings ############################################################## gofmt -w -r 'btrfsinspect.DumpTrees -> dumptrees.DumpTrees' cmd lib gofmt -w -r 'btrfsinspect.MountRO -> mount.MountRO' cmd lib gofmt -w -r 'btrfsinspect.ScanDevices -> rebuildmappings.ScanDevices' cmd lib gofmt -w -r 'btrfsinspect.ScanDevicesResult -> rebuildmappings.ScanDevicesResult' cmd lib gofmt -w -r 'btrfsinspect.SysExtentCSum -> rebuildmappings.SysExtentCSum' cmd lib gofmt -w -r 'rebuildmappings.IndexAll -> IndexAll ' cmd/btrfs-rec/inspect/rebuildmappings gofmt -w -r 'rebuildmappings.ScanDevicesResult -> ScanDevicesResult ' cmd/btrfs-rec/inspect/rebuildmappings gofmt -w -r 'rebuildmappings.SysExtentCSum -> SysExtentCSum ' cmd/btrfs-rec/inspect/rebuildmappings # btrfscheck ################################################################### sed -i -e 's/func handle/func Handle/' lib/btrfscheck/graph.go sed -i 's/handle/btrfscheck.Handle/g' cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go gofmt -w -r 'fsErr -> FSErr ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees gofmt -w -r 'want -> Want ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees gofmt -w -r 'wantOff -> WantOff ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees gofmt -w -r 'wantDirIndex -> WantDirIndex ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees gofmt -w -r 'wantCSum -> WantCSum ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees gofmt -w -r 'wantFileExt -> WantFileExt ' lib/btrfscheck cmd/btrfs-rec/inspect/rebuildtrees # generic imports ############################################################## replace() { git grep -l "$1"|xargs -r sed -i "s,$1,$2,g" } replace 'lib/btrfsprogs/btrfsinspect/rebuildmappings"' 'cmd/btrfs-rec/inspect/rebuildmappings"' replace 'lib/btrfsprogs/btrfsinspect/rebuildnodes"' 'cmd/btrfs-rec/inspect/rebuildtrees"' replace 'lib/btrfsprogs/btrfsutil"' 'lib/btrfsutil"' goimports -w cmd lib ./tools/bin/golangci-lint run --fix ./... And then touch-up copyright statements by hand. --- cmd/btrfs-rec/inspect/dumptrees/print_tree.go | 2 +- cmd/btrfs-rec/inspect/mount/mount.go | 4 +- cmd/btrfs-rec/inspect/rebuildmappings/process.go | 3 +- .../inspect/rebuildmappings/process_blockgroups.go | 5 +- .../rebuildmappings/process_sums_logical.go | 13 ++- .../rebuildmappings/process_sums_physical.go | 3 +- cmd/btrfs-rec/inspect/rebuildmappings/scan.go | 2 +- cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go | 23 +++-- .../inspect/rebuildtrees/rebuild_treecb.go | 6 +- .../inspect/rebuildtrees/rebuild_wantcb.go | 50 +++++----- .../inspect/rebuildtrees/rebuild_wanttyp.go | 2 +- cmd/btrfs-rec/inspect/rebuildtrees/scan.go | 21 ++--- cmd/btrfs-rec/inspect/rebuildtrees/util.go | 8 +- cmd/btrfs-rec/inspect_dumptrees.go | 6 +- cmd/btrfs-rec/inspect_lsfiles.go | 2 +- cmd/btrfs-rec/inspect_lstrees.go | 8 +- cmd/btrfs-rec/inspect_mount.go | 6 +- cmd/btrfs-rec/inspect_rebuildmappings.go | 5 +- cmd/btrfs-rec/inspect_rebuildtrees.go | 10 +- cmd/btrfs-rec/inspect_scandevices.go | 4 +- cmd/btrfs-rec/inspect_spewitems.go | 2 +- cmd/btrfs-rec/main.go | 2 +- lib/btrfscheck/graph.go | 104 ++++++++++----------- lib/btrfsutil/graph.go | 4 +- lib/btrfsutil/graph_loops.go | 4 +- lib/btrfsutil/nestedlock.go | 2 +- lib/btrfsutil/print_addrspace.go | 2 +- lib/btrfsutil/rebuilt_forrest.go | 10 +- lib/btrfsutil/rebuilt_readitem.go | 19 ++-- lib/btrfsutil/rebuilt_tree.go | 15 ++- 30 files changed, 168 insertions(+), 179 deletions(-) (limited to 'lib/btrfsutil') diff --git a/cmd/btrfs-rec/inspect/dumptrees/print_tree.go b/cmd/btrfs-rec/inspect/dumptrees/print_tree.go index 240c72f..676306a 100644 --- a/cmd/btrfs-rec/inspect/dumptrees/print_tree.go +++ b/cmd/btrfs-rec/inspect/dumptrees/print_tree.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrfsinspect +package dumptrees import ( "context" diff --git a/cmd/btrfs-rec/inspect/mount/mount.go b/cmd/btrfs-rec/inspect/mount/mount.go index 0ac8497..7e68ae6 100644 --- a/cmd/btrfs-rec/inspect/mount/mount.go +++ b/cmd/btrfs-rec/inspect/mount/mount.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrfsinspect +package mount import ( "context" @@ -25,7 +25,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/slices" diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process.go b/cmd/btrfs-rec/inspect/rebuildmappings/process.go index cdf5e5a..5f41f15 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process.go @@ -12,7 +12,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/textui" @@ -26,7 +25,7 @@ func getNodeSize(fs *btrfs.FS) (btrfsvol.AddrDelta, error) { return btrfsvol.AddrDelta(sb.NodeSize), nil } -func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) error { +func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesResult) error { nodeSize, err := getNodeSize(fs) if err != nil { return err diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go index 0e2d5a0..f8d2337 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_blockgroups.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later @@ -9,7 +9,6 @@ import ( "sort" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" ) @@ -20,7 +19,7 @@ type BlockGroup struct { Flags btrfsvol.BlockGroupFlags } -func DedupBlockGroups(scanResults btrfsinspect.ScanDevicesResult) (map[btrfsvol.LogicalAddr]BlockGroup, error) { +func DedupBlockGroups(scanResults ScanDevicesResult) (map[btrfsvol.LogicalAddr]BlockGroup, error) { // Dedup bgsSet := make(containers.Set[BlockGroup]) for _, devResults := range scanResults { diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go index 7c02d05..2cdabb7 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_logical.go @@ -15,13 +15,12 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/slices" ) -func ExtractLogicalSums(ctx context.Context, scanResults btrfsinspect.ScanDevicesResult) SumRunWithGaps[btrfsvol.LogicalAddr] { - var records []btrfsinspect.SysExtentCSum +func ExtractLogicalSums(ctx context.Context, scanResults ScanDevicesResult) SumRunWithGaps[btrfsvol.LogicalAddr] { + var records []SysExtentCSum for _, devResults := range scanResults { records = append(records, devResults.FoundExtentCSums...) } @@ -53,10 +52,10 @@ func ExtractLogicalSums(ctx context.Context, scanResults btrfsinspect.ScanDevice // "AAAAAAA" shouldn't be present, and if we just discard "BBBBBBBB" // because it conflicts with "CCCCCCC", then we would erroneously // include "AAAAAAA". - addrspace := new(containers.RBTree[btrfsinspect.SysExtentCSum]) + addrspace := new(containers.RBTree[SysExtentCSum]) for _, newRecord := range records { for { - conflict := addrspace.Search(func(oldRecord btrfsinspect.SysExtentCSum) int { + conflict := addrspace.Search(func(oldRecord SysExtentCSum) int { switch { case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) <= oldRecord.Sums.Addr: // 'newRecord' is wholly to the left of 'oldRecord'. @@ -128,7 +127,7 @@ func ExtractLogicalSums(ctx context.Context, scanResults btrfsinspect.ScanDevice case newRecord.Sums.Addr.Add(newRecord.Sums.Size()) > overlapEnd: suffix = newRecord.Sums.Sums[newOverlapEnd:] } - unionRecord := btrfsinspect.SysExtentCSum{ + unionRecord := SysExtentCSum{ Generation: oldRecord.Generation, Sums: btrfsitem.ExtentCSum{ SumRun: btrfssum.SumRun[btrfsvol.LogicalAddr]{ @@ -148,7 +147,7 @@ func ExtractLogicalSums(ctx context.Context, scanResults btrfsinspect.ScanDevice var flattened SumRunWithGaps[btrfsvol.LogicalAddr] var curAddr btrfsvol.LogicalAddr var curSums strings.Builder - addrspace.Range(func(node *containers.RBNode[btrfsinspect.SysExtentCSum]) bool { + addrspace.Range(func(node *containers.RBNode[SysExtentCSum]) bool { curEnd := curAddr + (btrfsvol.LogicalAddr(curSums.Len()/sumSize) * btrfssum.BlockSize) if node.Value.Sums.Addr != curEnd { if curSums.Len() > 0 { diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go index da22fbf..392ded9 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_sums_physical.go @@ -13,11 +13,10 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/maps" ) -func ExtractPhysicalSums(scanResults btrfsinspect.ScanDevicesResult) map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr] { +func ExtractPhysicalSums(scanResults ScanDevicesResult) map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr] { ret := make(map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], len(scanResults)) for devID, devResults := range scanResults { ret[devID] = devResults.Checksums diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go index d54be71..b67beaf 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrfsinspect +package rebuildmappings import ( "context" diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go index cf334a0..6492d9c 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "context" @@ -14,15 +14,14 @@ import ( "github.com/datawire/dlib/dgroup" "github.com/datawire/dlib/dlog" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/btrees" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfscheck" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/textui" @@ -46,10 +45,10 @@ func (o keyAndTree) String() string { type rebuilder struct { sb btrfstree.Superblock - graph graph.Graph - keyIO *keyio.Handle + graph btrfsutil.Graph + keyIO *btrfsutil.KeyIO - rebuilt *btrees.RebuiltForrest + rebuilt *btrfsutil.RebuiltForrest curKey struct { TreeID btrfsprim.ObjID @@ -75,7 +74,7 @@ type Rebuilder interface { ListRoots(context.Context) map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr] } -func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspect.ScanDevicesResult) (Rebuilder, error) { +func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults rebuildmappings.ScanDevicesResult) (Rebuilder, error) { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data") sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging if err != nil { @@ -87,7 +86,7 @@ func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults btrfsinspec graph: nodeGraph, keyIO: keyIO, } - o.rebuilt = btrees.NewRebuiltForrest(sb, nodeGraph, keyIO, o) + o.rebuilt = btrfsutil.NewRebuiltForrest(sb, nodeGraph, keyIO, o) return o, nil } @@ -214,7 +213,7 @@ func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { progress.NumAugments = o.numAugments progress.NumAugmentTrees = len(o.augmentQueue) progressWriter.Set(progress) - } else if !handleWouldBeNoOp(key.ItemType) { + } else if !btrfscheck.HandleWouldBeNoOp(key.ItemType) { o.settledItemQueue.Insert(key) } } @@ -284,7 +283,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) o.curKey.TreeID = item.TreeID o.curKey.Key.Val = item.Key - handleItem(o, ctx, item.TreeID, btrfstree.Item{ + btrfscheck.HandleItem(o, ctx, item.TreeID, btrfstree.Item{ Key: item.Key, Body: item.Body, }) diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go index 492436b..d58689c 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "context" @@ -50,7 +50,7 @@ func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offse case *btrfsitem.Root: return btrfsprim.Generation(foundKey.Offset), *itemBody, true case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err)) + o.FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", foundKey, itemBody.Err)) return 0, btrfsitem.Root{}, false default: // This is a panic because the item decoder should not emit ROOT_ITEM items as anything but @@ -76,7 +76,7 @@ func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btr case *btrfsitem.UUIDMap: return itemBody.ObjID, true case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err)) + o.FSErr(ctx, fmt.Errorf("error decoding item: %v: %w", wantKey, itemBody.Err)) return 0, false default: // This is a panic because the item decoder should not emit UUID_SUBVOL items as anything but diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go index adf3cff..06be630 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "bytes" @@ -15,17 +15,17 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/containers" ) // fsErr implements rebuildCallbacks. -func (o *rebuilder) fsErr(ctx context.Context, e error) { +func (o *rebuilder) FSErr(ctx context.Context, e error) { dlog.Errorf(ctx, "filesystem error: %v", e) } // want implements rebuildCallbacks. -func (o *rebuilder) want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { +func (o *rebuilder) Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { wantKey := WantWithTree{ TreeID: treeID, Key: Want{ @@ -47,7 +47,7 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp // check if we already have it tgt := wantKey.Key.Key() - if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ keyio.ItemPtr) int { + if key, _, ok := o.rebuilt.Tree(ctx, wantKey.TreeID).Items(ctx).Search(func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int { key.Offset = 0 return tgt.Compare(key) }); ok { @@ -61,11 +61,11 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp } wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { + func(k btrfsprim.Key, _ btrfsutil.ItemPtr) int { k.Offset = 0 return tgt.Compare(k) }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { + func(_ btrfsprim.Key, v btrfsutil.ItemPtr) bool { wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) return true }) @@ -74,7 +74,7 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp } // wantOff implements rebuildCallbacks. -func (o *rebuilder) wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { +func (o *rebuilder) WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { wantKey := WantWithTree{ TreeID: treeID, Key: Want{ @@ -108,8 +108,8 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool } wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Tree(ctx, wantKey.TreeID).PotentialItems(ctx).Subrange( - func(k btrfsprim.Key, _ keyio.ItemPtr) int { return tgt.Compare(k) }, - func(_ btrfsprim.Key, v keyio.ItemPtr) bool { + func(k btrfsprim.Key, _ btrfsutil.ItemPtr) int { return tgt.Compare(k) }, + func(_ btrfsprim.Key, v btrfsutil.ItemPtr) bool { wants.InsertFrom(o.rebuilt.Tree(ctx, wantKey.TreeID).LeafToRoots(ctx, v.Node)) return true }) @@ -118,7 +118,7 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool } // wantDirIndex implements rebuildCallbacks. -func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { +func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { wantKey := WantWithTree{ TreeID: treeID, Key: Want{ @@ -140,11 +140,11 @@ func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrf tgt := wantKey.Key.Key() found := false o.rebuilt.Tree(ctx, treeID).Items(ctx).Subrange( - func(key btrfsprim.Key, _ keyio.ItemPtr) int { + func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int { key.Offset = 0 return tgt.Compare(key) }, - func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { + func(_ btrfsprim.Key, ptr btrfsutil.ItemPtr) bool { if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { found = true } @@ -161,11 +161,11 @@ func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrf } wants := make(containers.Set[btrfsvol.LogicalAddr]) o.rebuilt.Tree(ctx, treeID).PotentialItems(ctx).Subrange( - func(key btrfsprim.Key, _ keyio.ItemPtr) int { + func(key btrfsprim.Key, _ btrfsutil.ItemPtr) int { key.Offset = 0 return tgt.Compare(key) }, - func(_ btrfsprim.Key, ptr keyio.ItemPtr) bool { + func(_ btrfsprim.Key, ptr btrfsutil.ItemPtr) bool { if itemName, ok := o.keyIO.Names[ptr]; ok && bytes.Equal(itemName, name) { wants.InsertFrom(o.rebuilt.Tree(ctx, treeID).LeafToRoots(ctx, ptr.Node)) } @@ -176,10 +176,10 @@ func (o *rebuilder) wantDirIndex(ctx context.Context, reason string, treeID btrf func (o *rebuilder) _walkRange( ctx context.Context, - items *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr], + items *containers.SortedMap[btrfsprim.Key, btrfsutil.ItemPtr], treeID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, beg, end uint64, - fn func(key btrfsprim.Key, ptr keyio.ItemPtr, beg, end uint64), + fn func(key btrfsprim.Key, ptr btrfsutil.ItemPtr, beg, end uint64), ) { min := btrfsprim.Key{ ObjectID: objID, @@ -192,7 +192,7 @@ func (o *rebuilder) _walkRange( Offset: end - 1, } items.Subrange( - func(runKey btrfsprim.Key, _ keyio.ItemPtr) int { + func(runKey btrfsprim.Key, _ btrfsutil.ItemPtr) int { switch { case min.Compare(runKey) < 0: return 1 @@ -202,14 +202,14 @@ func (o *rebuilder) _walkRange( return 0 } }, - func(runKey btrfsprim.Key, runPtr keyio.ItemPtr) bool { + func(runKey btrfsprim.Key, runPtr btrfsutil.ItemPtr) bool { runSizeAndErr, ok := o.keyIO.Sizes[runPtr] if !ok { panic(fmt.Errorf("should not happen: %v (%v) did not have a size recorded", runPtr, keyAndTree{TreeID: treeID, Key: runKey})) } if runSizeAndErr.Err != nil { - o.fsErr(ctx, fmt.Errorf("get size: %v (%v): %w", + o.FSErr(ctx, fmt.Errorf("get size: %v (%v): %w", runPtr, keyAndTree{TreeID: treeID, Key: runKey}, runSizeAndErr.Err)) return true @@ -273,7 +273,7 @@ func (o *rebuilder) _wantRange( ctx, o.rebuilt.Tree(ctx, treeID).Items(ctx), treeID, objID, typ, beg, end, - func(runKey btrfsprim.Key, _ keyio.ItemPtr, runBeg, runEnd uint64) { + func(runKey btrfsprim.Key, _ btrfsutil.ItemPtr, runBeg, runEnd uint64) { var overlappingGaps []*containers.RBNode[gap] gaps.Subrange( func(gap gap) int { @@ -324,7 +324,7 @@ func (o *rebuilder) _wantRange( ctx, potentialItems, treeID, objID, typ, gap.Beg, gap.End, - func(k btrfsprim.Key, v keyio.ItemPtr, runBeg, runEnd uint64) { + func(k btrfsprim.Key, v btrfsutil.ItemPtr, runBeg, runEnd uint64) { // TODO: This is dumb and greedy. if last < runBeg { // log an error @@ -353,7 +353,7 @@ func (o *rebuilder) _wantRange( // func implements rebuildCallbacks. // // interval is [beg, end) -func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { +func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { inodeWant := WantWithTree{ TreeID: inodeTree, Key: Want{ @@ -377,7 +377,7 @@ func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inod panic(fmt.Errorf("should not happen: INODE_ITEM did not have flags recorded")) } if inodeFlags.Err != nil { - o.fsErr(inodeCtx, inodeFlags.Err) + o.FSErr(inodeCtx, inodeFlags.Err) return } @@ -392,7 +392,7 @@ func (o *rebuilder) wantCSum(ctx context.Context, reason string, inodeTree, inod } // wantFileExt implements rebuildCallbacks. -func (o *rebuilder) wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { +func (o *rebuilder) WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { o._wantRange( ctx, reason, treeID, ino, btrfsprim.EXTENT_DATA_KEY, diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go index 2b471fe..fa1ba98 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "context" diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go index 17949ab..03261e0 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "context" @@ -10,22 +10,21 @@ import ( "github.com/datawire/dlib/dlog" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) -func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.ScanDevicesResult) (btrfstree.Superblock, graph.Graph, *keyio.Handle, error) { +func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults rebuildmappings.ScanDevicesResult) (btrfstree.Superblock, btrfsutil.Graph, *btrfsutil.KeyIO, error) { dlog.Info(ctx, "Reading superblock...") sb, err := fs.Superblock() if err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err + return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err } dlog.Infof(ctx, "Reading node data from FS...") @@ -36,21 +35,21 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"), dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - nodeGraph := graph.New(*sb) - keyIO := keyio.NewHandle(fs, *sb) + nodeGraph := btrfsutil.NewGraph(*sb) + keyIO := btrfsutil.NewKeyIO(fs, *sb) progressWriter.Set(stats) for _, devResults := range scanResults { for _, laddr := range maps.SortedKeys(devResults.FoundNodes) { if err := ctx.Err(); err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err + return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err } nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](fs, *sb, laddr, btrfstree.NodeExpectations{ LAddr: containers.Optional[btrfsvol.LogicalAddr]{OK: true, Val: laddr}, }) if err != nil { btrfstree.FreeNodeRef(nodeRef) - return btrfstree.Superblock{}, graph.Graph{}, nil, err + return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err } nodeGraph.InsertNode(nodeRef) @@ -69,7 +68,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults btrfsinspect.Sca dlog.Info(ctx, "... done reading node data") if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil { - return btrfstree.Superblock{}, graph.Graph{}, nil, err + return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err } keyIO.SetGraph(*nodeGraph) diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/util.go b/cmd/btrfs-rec/inspect/rebuildtrees/util.go index 9d91f23..71caee0 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/util.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/util.go @@ -1,16 +1,16 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package rebuildtrees import ( "golang.org/x/exp/constraints" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" ) -func countNodes(nodeScanResults btrfsinspect.ScanDevicesResult) int { +func countNodes(nodeScanResults rebuildmappings.ScanDevicesResult) int { var cnt int for _, devResults := range nodeScanResults { cnt += len(devResults.FoundNodes) diff --git a/cmd/btrfs-rec/inspect_dumptrees.go b/cmd/btrfs-rec/inspect_dumptrees.go index 4dc4bb2..efdc380 100644 --- a/cmd/btrfs-rec/inspect_dumptrees.go +++ b/cmd/btrfs-rec/inspect_dumptrees.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later @@ -10,8 +10,8 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/dumptrees" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) @@ -26,7 +26,7 @@ func init() { const version = "6.1.3" out := os.Stdout textui.Fprintf(out, "btrfs-progs v%v\n", version) - btrfsinspect.DumpTrees(cmd.Context(), out, fs) + dumptrees.DumpTrees(cmd.Context(), out, fs) return nil }, }) diff --git a/cmd/btrfs-rec/inspect_lsfiles.go b/cmd/btrfs-rec/inspect_lsfiles.go index 7ecaaf0..d8da481 100644 --- a/cmd/btrfs-rec/inspect_lsfiles.go +++ b/cmd/btrfs-rec/inspect_lsfiles.go @@ -20,7 +20,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) diff --git a/cmd/btrfs-rec/inspect_lstrees.go b/cmd/btrfs-rec/inspect_lstrees.go index 69295f0..df2473c 100644 --- a/cmd/btrfs-rec/inspect_lstrees.go +++ b/cmd/btrfs-rec/inspect_lstrees.go @@ -12,13 +12,13 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/diskio" "git.lukeshu.com/btrfs-progs-ng/lib/maps" @@ -36,10 +36,10 @@ func init() { }, RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) error { ctx := cmd.Context() - var scanResults btrfsinspect.ScanDevicesResult + var scanResults rebuildmappings.ScanDevicesResult if scandevicesFilename != "" { var err error - scanResults, err = readJSONFile[btrfsinspect.ScanDevicesResult](ctx, scandevicesFilename) + scanResults, err = readJSONFile[rebuildmappings.ScanDevicesResult](ctx, scandevicesFilename) if err != nil { return err } diff --git a/cmd/btrfs-rec/inspect_mount.go b/cmd/btrfs-rec/inspect_mount.go index cff33b6..f3fda34 100644 --- a/cmd/btrfs-rec/inspect_mount.go +++ b/cmd/btrfs-rec/inspect_mount.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later @@ -8,8 +8,8 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/mount" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" ) func init() { @@ -21,7 +21,7 @@ func init() { Args: cliutil.WrapPositionalArgs(cobra.ExactArgs(1)), }, RunE: func(fs *btrfs.FS, cmd *cobra.Command, args []string) error { - return btrfsinspect.MountRO(cmd.Context(), fs, args[0], skipFileSums) + return mount.MountRO(cmd.Context(), fs, args[0], skipFileSums) }, } cmd.Command.Flags().BoolVar(&skipFileSums, "skip-filesums", false, diff --git a/cmd/btrfs-rec/inspect_rebuildmappings.go b/cmd/btrfs-rec/inspect_rebuildmappings.go index 51f0327..005fd5d 100644 --- a/cmd/btrfs-rec/inspect_rebuildmappings.go +++ b/cmd/btrfs-rec/inspect_rebuildmappings.go @@ -12,9 +12,8 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildmappings" ) func init() { @@ -36,7 +35,7 @@ func init() { ctx := cmd.Context() dlog.Infof(ctx, "Reading %q...", args[0]) - scanResults, err := readJSONFile[btrfsinspect.ScanDevicesResult](ctx, args[0]) + scanResults, err := readJSONFile[rebuildmappings.ScanDevicesResult](ctx, args[0]) if err != nil { return err } diff --git a/cmd/btrfs-rec/inspect_rebuildtrees.go b/cmd/btrfs-rec/inspect_rebuildtrees.go index ba1dcab..0b41dd9 100644 --- a/cmd/btrfs-rec/inspect_rebuildtrees.go +++ b/cmd/btrfs-rec/inspect_rebuildtrees.go @@ -15,9 +15,9 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildtrees" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) @@ -32,15 +32,15 @@ func init() { // This is wrapped in a func in order to *ensure* that `nodeScanResults` goes out of scope once // `rebuilder` has been created. - rebuilder, err := func(ctx context.Context) (rebuildnodes.Rebuilder, error) { + rebuilder, err := func(ctx context.Context) (rebuildtrees.Rebuilder, error) { dlog.Infof(ctx, "Reading %q...", args[0]) - nodeScanResults, err := readJSONFile[btrfsinspect.ScanDevicesResult](ctx, args[0]) + nodeScanResults, err := readJSONFile[rebuildmappings.ScanDevicesResult](ctx, args[0]) if err != nil { return nil, err } dlog.Infof(ctx, "... done reading %q", args[0]) - return rebuildnodes.NewRebuilder(ctx, fs, nodeScanResults) + return rebuildtrees.NewRebuilder(ctx, fs, nodeScanResults) }(ctx) if err != nil { return err diff --git a/cmd/btrfs-rec/inspect_scandevices.go b/cmd/btrfs-rec/inspect_scandevices.go index 3de35d0..0542d6a 100644 --- a/cmd/btrfs-rec/inspect_scandevices.go +++ b/cmd/btrfs-rec/inspect_scandevices.go @@ -12,8 +12,8 @@ import ( "github.com/datawire/ocibuild/pkg/cliutil" "github.com/spf13/cobra" + "git.lukeshu.com/btrfs-progs-ng/cmd/btrfs-rec/inspect/rebuildmappings" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect" ) func init() { @@ -25,7 +25,7 @@ func init() { RunE: func(fs *btrfs.FS, cmd *cobra.Command, _ []string) (err error) { ctx := cmd.Context() - results, err := btrfsinspect.ScanDevices(ctx, fs) + results, err := rebuildmappings.ScanDevices(ctx, fs) if err != nil { return err } diff --git a/cmd/btrfs-rec/inspect_spewitems.go b/cmd/btrfs-rec/inspect_spewitems.go index 3b79e8b..4abb2b0 100644 --- a/cmd/btrfs-rec/inspect_spewitems.go +++ b/cmd/btrfs-rec/inspect_spewitems.go @@ -14,7 +14,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) diff --git a/cmd/btrfs-rec/main.go b/cmd/btrfs-rec/main.go index dc00dab..bf89fbc 100644 --- a/cmd/btrfs-rec/main.go +++ b/cmd/btrfs-rec/main.go @@ -15,7 +15,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsutil" + "git.lukeshu.com/btrfs-progs-ng/lib/btrfsutil" "git.lukeshu.com/btrfs-progs-ng/lib/profile" "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) diff --git a/lib/btrfscheck/graph.go b/lib/btrfscheck/graph.go index 710030c..c35baf8 100644 --- a/lib/btrfscheck/graph.go +++ b/lib/btrfscheck/graph.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package rebuildnodes +package btrfscheck import ( "context" @@ -15,17 +15,17 @@ import ( ) type rebuildCallbacks interface { - fsErr(ctx context.Context, e error) - want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) - wantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) - wantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) - wantCSum(ctx context.Context, reason string, inodeTree, inodeItem btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) - wantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) + FSErr(ctx context.Context, e error) + Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) + WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) + WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) + WantCSum(ctx context.Context, reason string, inodeTree, inodeItem btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) // interval is [beg, end) + WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) } // handleWouldBeNoOp returns whether or not a call to handleItem for a // given item type would be a no-op. -func handleWouldBeNoOp(typ btrfsprim.ItemType) bool { +func HandleWouldBeNoOp(typ btrfsprim.ItemType) bool { switch typ { case // btrfsitem.Dev btrfsprim.DEV_ITEM_KEY, @@ -45,30 +45,30 @@ func handleWouldBeNoOp(typ btrfsprim.ItemType) bool { } } -func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { +func HandleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { // Notionally, just express the relationships shown in // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page // https://btrfs.wiki.kernel.org/index.php/Data_Structures ) switch body := item.Body.(type) { case *btrfsitem.BlockGroup: - o.want(ctx, "Chunk", + o.Want(ctx, "Chunk", btrfsprim.CHUNK_TREE_OBJECTID, body.ChunkObjectID, btrfsitem.CHUNK_ITEM_KEY) - o.wantOff(ctx, "FreeSpaceInfo", + o.WantOff(ctx, "FreeSpaceInfo", btrfsprim.FREE_SPACE_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.FREE_SPACE_INFO_KEY, item.Key.Offset) case *btrfsitem.Chunk: - o.want(ctx, "owning Root", + o.Want(ctx, "owning Root", btrfsprim.ROOT_TREE_OBJECTID, body.Head.Owner, btrfsitem.ROOT_ITEM_KEY) case *btrfsitem.Dev: // nothing case *btrfsitem.DevExtent: - o.wantOff(ctx, "Chunk", + o.WantOff(ctx, "Chunk", body.ChunkTree, body.ChunkObjectID, btrfsitem.CHUNK_ITEM_KEY, @@ -77,7 +77,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // nothing case *btrfsitem.DirEntry: // containing-directory - o.wantOff(ctx, "containing dir inode", + o.WantOff(ctx, "containing dir inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, @@ -85,12 +85,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // siblings switch item.Key.ItemType { case btrfsitem.DIR_ITEM_KEY: - o.wantDirIndex(ctx, "corresponding DIR_INDEX", + o.WantDirIndex(ctx, "corresponding DIR_INDEX", treeID, item.Key.ObjectID, body.Name) case btrfsitem.DIR_INDEX_KEY: - o.wantOff(ctx, "corresponding DIR_ITEM", + o.WantOff(ctx, "corresponding DIR_ITEM", treeID, item.Key.ObjectID, btrfsitem.DIR_ITEM_KEY, @@ -107,23 +107,23 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, if body.Location != (btrfsprim.Key{}) { switch body.Location.ItemType { case btrfsitem.INODE_ITEM_KEY: - o.wantOff(ctx, "item being pointed to", + o.WantOff(ctx, "item being pointed to", treeID, body.Location.ObjectID, body.Location.ItemType, body.Location.Offset) - o.wantOff(ctx, "backref from item being pointed to", + o.WantOff(ctx, "backref from item being pointed to", treeID, body.Location.ObjectID, btrfsitem.INODE_REF_KEY, uint64(item.Key.ObjectID)) case btrfsitem.ROOT_ITEM_KEY: - o.want(ctx, "Root of subvolume being pointed to", + o.Want(ctx, "Root of subvolume being pointed to", btrfsprim.ROOT_TREE_OBJECTID, body.Location.ObjectID, body.Location.ItemType) default: - o.fsErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType)) + o.FSErr(ctx, fmt.Errorf("DirEntry: unexpected .Location.ItemType=%v", body.Location.ItemType)) } } case *btrfsitem.Empty: @@ -141,12 +141,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case nil: // nothing case *btrfsitem.ExtentDataRef: - o.wantOff(ctx, "referencing Inode", + o.WantOff(ctx, "referencing Inode", refBody.Root, refBody.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(ctx, "referencing FileExtent", + o.WantOff(ctx, "referencing FileExtent", refBody.Root, refBody.ObjectID, btrfsitem.EXTENT_DATA_KEY, @@ -162,22 +162,22 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case *btrfsitem.ExtentCSum: // nothing case *btrfsitem.ExtentDataRef: - o.want(ctx, "Extent being referenced", + o.Want(ctx, "Extent being referenced", btrfsprim.EXTENT_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.EXTENT_ITEM_KEY) - o.wantOff(ctx, "referencing Inode", + o.WantOff(ctx, "referencing Inode", body.Root, body.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(ctx, "referencing FileExtent", + o.WantOff(ctx, "referencing FileExtent", body.Root, body.ObjectID, btrfsitem.EXTENT_DATA_KEY, uint64(body.Offset)) case *btrfsitem.FileExtent: - o.wantOff(ctx, "containing Inode", + o.WantOff(ctx, "containing Inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, @@ -187,64 +187,64 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, // nothing case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: // NB: o.wantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us. - o.wantCSum(ctx, "data sum", + o.WantCSum(ctx, "data sum", treeID, item.Key.ObjectID, body.BodyExtent.DiskByteNr, body.BodyExtent.DiskByteNr.Add(body.BodyExtent.DiskNumBytes)) default: - o.fsErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type)) + o.FSErr(ctx, fmt.Errorf("FileExtent: unexpected body.Type=%v", body.Type)) } case *btrfsitem.FreeSpaceBitmap: - o.wantOff(ctx, "FreeSpaceInfo", + o.WantOff(ctx, "FreeSpaceInfo", treeID, item.Key.ObjectID, btrfsitem.FREE_SPACE_INFO_KEY, item.Key.Offset) case *btrfsitem.FreeSpaceHeader: - o.wantOff(ctx, ".Location", + o.WantOff(ctx, ".Location", treeID, body.Location.ObjectID, body.Location.ItemType, body.Location.Offset) case *btrfsitem.FreeSpaceInfo: if body.Flags.Has(btrfsitem.FREE_SPACE_USING_BITMAPS) { - o.wantOff(ctx, "FreeSpaceBitmap", + o.WantOff(ctx, "FreeSpaceBitmap", treeID, item.Key.ObjectID, btrfsitem.FREE_SPACE_BITMAP_KEY, item.Key.Offset) } case *btrfsitem.Inode: - o.want(ctx, "backrefs", + o.Want(ctx, "backrefs", treeID, // TODO: validate the number of these against body.NLink item.Key.ObjectID, btrfsitem.INODE_REF_KEY) - o.wantFileExt(ctx, "FileExtents", + o.WantFileExt(ctx, "FileExtents", treeID, item.Key.ObjectID, body.Size) if body.BlockGroup != 0 { - o.want(ctx, "BlockGroup", + o.Want(ctx, "BlockGroup", btrfsprim.EXTENT_TREE_OBJECTID, body.BlockGroup, btrfsitem.BLOCK_GROUP_ITEM_KEY) } case *btrfsitem.InodeRefs: - o.wantOff(ctx, "child Inode", + o.WantOff(ctx, "child Inode", treeID, item.Key.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(ctx, "parent Inode", + o.WantOff(ctx, "parent Inode", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.INODE_ITEM_KEY, 0) for _, ref := range body.Refs { - o.wantOff(ctx, "DIR_ITEM", + o.WantOff(ctx, "DIR_ITEM", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.DIR_ITEM_KEY, btrfsitem.NameHash(ref.Name)) - o.wantOff(ctx, "DIR_INDEX", + o.WantOff(ctx, "DIR_INDEX", treeID, btrfsprim.ObjID(item.Key.Offset), btrfsitem.DIR_INDEX_KEY, @@ -256,12 +256,12 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case nil: // nothing case *btrfsitem.ExtentDataRef: - o.wantOff(ctx, "referencing INode", + o.WantOff(ctx, "referencing INode", refBody.Root, refBody.ObjectID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(ctx, "referencing FileExtent", + o.WantOff(ctx, "referencing FileExtent", refBody.Root, refBody.ObjectID, btrfsitem.EXTENT_DATA_KEY, @@ -276,7 +276,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } case *btrfsitem.Root: if body.RootDirID != 0 { - o.wantOff(ctx, "root directory", + o.WantOff(ctx, "root directory", item.Key.ObjectID, body.RootDirID, btrfsitem.INODE_ITEM_KEY, @@ -284,7 +284,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } if body.UUID != (btrfsprim.UUID{}) { key := btrfsitem.UUIDToKey(body.UUID) - o.wantOff(ctx, "uuid", + o.WantOff(ctx, "uuid", btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, @@ -292,7 +292,7 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, } if body.ParentUUID != (btrfsprim.UUID{}) { key := btrfsitem.UUIDToKey(body.ParentUUID) - o.wantOff(ctx, "parent uuid", + o.WantOff(ctx, "parent uuid", btrfsprim.UUID_TREE_OBJECTID, key.ObjectID, key.ItemType, @@ -317,48 +317,48 @@ func handleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, panic(fmt.Errorf("should not happen: RootRef: unexpected ItemType=%v", item.Key.ItemType)) } // sibling - o.wantOff(ctx, fmt.Sprintf("corresponding %v", otherType), + o.WantOff(ctx, fmt.Sprintf("corresponding %v", otherType), treeID, btrfsprim.ObjID(item.Key.Offset), otherType, uint64(item.Key.ObjectID)) // parent - o.want(ctx, "parent subvolume: Root", + o.Want(ctx, "parent subvolume: Root", treeID, parent, btrfsitem.ROOT_ITEM_KEY) - o.wantOff(ctx, "parent subvolume: Inode of parent dir", + o.WantOff(ctx, "parent subvolume: Inode of parent dir", parent, body.DirID, btrfsitem.INODE_ITEM_KEY, 0) - o.wantOff(ctx, "parent subvolume: DIR_ITEM in parent dir", + o.WantOff(ctx, "parent subvolume: DIR_ITEM in parent dir", parent, body.DirID, btrfsitem.DIR_ITEM_KEY, btrfsitem.NameHash(body.Name)) - o.wantOff(ctx, "parent subvolume: DIR_INDEX in parent dir", + o.WantOff(ctx, "parent subvolume: DIR_INDEX in parent dir", parent, body.DirID, btrfsitem.DIR_INDEX_KEY, uint64(body.Sequence)) // child - o.want(ctx, "child subvolume: Root", + o.Want(ctx, "child subvolume: Root", treeID, child, btrfsitem.ROOT_ITEM_KEY) case *btrfsitem.SharedDataRef: - o.want(ctx, "Extent", + o.Want(ctx, "Extent", btrfsprim.EXTENT_TREE_OBJECTID, item.Key.ObjectID, btrfsitem.EXTENT_ITEM_KEY) case *btrfsitem.UUIDMap: - o.want(ctx, "subvolume Root", + o.Want(ctx, "subvolume Root", btrfsprim.ROOT_TREE_OBJECTID, body.ObjID, btrfsitem.ROOT_ITEM_KEY) case *btrfsitem.Error: - o.fsErr(ctx, fmt.Errorf("error decoding item: %w", body.Err)) + o.FSErr(ctx, fmt.Errorf("error decoding item: %w", body.Err)) default: // This is a panic because the item decoder should not emit new types without this // code also being updated. diff --git a/lib/btrfsutil/graph.go b/lib/btrfsutil/graph.go index 2a97ec8..f294a28 100644 --- a/lib/btrfsutil/graph.go +++ b/lib/btrfsutil/graph.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package graph +package btrfsutil import ( "context" @@ -120,7 +120,7 @@ func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) { }) } -func New(sb btrfstree.Superblock) *Graph { +func NewGraph(sb btrfstree.Superblock) *Graph { g := &Graph{ Nodes: make(map[btrfsvol.LogicalAddr]Node), BadNodes: make(map[btrfsvol.LogicalAddr]error), diff --git a/lib/btrfsutil/graph_loops.go b/lib/btrfsutil/graph_loops.go index 0e51805..9563d19 100644 --- a/lib/btrfsutil/graph_loops.go +++ b/lib/btrfsutil/graph_loops.go @@ -1,8 +1,8 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later -package graph +package btrfsutil import ( "fmt" diff --git a/lib/btrfsutil/nestedlock.go b/lib/btrfsutil/nestedlock.go index c1ffa18..917b4cd 100644 --- a/lib/btrfsutil/nestedlock.go +++ b/lib/btrfsutil/nestedlock.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrees +package btrfsutil import ( "context" diff --git a/lib/btrfsutil/print_addrspace.go b/lib/btrfsutil/print_addrspace.go index e85e055..c9c51f0 100644 --- a/lib/btrfsutil/print_addrspace.go +++ b/lib/btrfsutil/print_addrspace.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrfsinspect +package btrfsutil import ( "io" diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go index dbbc6eb..8d4b810 100644 --- a/lib/btrfsutil/rebuilt_forrest.go +++ b/lib/btrfsutil/rebuilt_forrest.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrees +package btrfsutil import ( "context" @@ -13,8 +13,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - pkggraph "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/slices" "git.lukeshu.com/btrfs-progs-ng/lib/textui" @@ -60,8 +58,8 @@ type Callbacks interface { type RebuiltForrest struct { // static sb btrfstree.Superblock - graph pkggraph.Graph - keyIO *keyio.Handle + graph Graph + keyIO *KeyIO cb Callbacks // mutable @@ -74,7 +72,7 @@ type RebuiltForrest struct { // NewRebuiltForrest returns a new RebuiltForrest instance. All of // the callbacks must be non-nil. -func NewRebuiltForrest(sb btrfstree.Superblock, graph pkggraph.Graph, keyIO *keyio.Handle, cb Callbacks) *RebuiltForrest { +func NewRebuiltForrest(sb btrfstree.Superblock, graph Graph, keyIO *KeyIO, cb Callbacks) *RebuiltForrest { return &RebuiltForrest{ sb: sb, graph: graph, diff --git a/lib/btrfsutil/rebuilt_readitem.go b/lib/btrfsutil/rebuilt_readitem.go index 56da32d..aa432bb 100644 --- a/lib/btrfsutil/rebuilt_readitem.go +++ b/lib/btrfsutil/rebuilt_readitem.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package keyio +package btrfsutil import ( "context" @@ -15,7 +15,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/graph" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/diskio" "git.lukeshu.com/btrfs-progs-ng/lib/textui" @@ -40,10 +39,10 @@ type FlagsAndErr struct { Err error } -type Handle struct { +type KeyIO struct { rawFile diskio.File[btrfsvol.LogicalAddr] sb btrfstree.Superblock - graph graph.Graph + graph Graph Flags map[ItemPtr]FlagsAndErr // INODE_ITEM Names map[ItemPtr][]byte // DIR_INDEX @@ -53,8 +52,8 @@ type Handle struct { cache containers.ARCache[btrfsvol.LogicalAddr, *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]] } -func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *Handle { - return &Handle{ +func NewKeyIO(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) *KeyIO { + return &KeyIO{ rawFile: file, sb: sb, @@ -71,7 +70,7 @@ func NewHandle(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) } } -func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { +func (o *KeyIO) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { for i, item := range nodeRef.Data.BodyLeaf { ptr := ItemPtr{ Node: nodeRef.Addr, @@ -115,11 +114,11 @@ func (o *Handle) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree. } } -func (o *Handle) SetGraph(graph graph.Graph) { +func (o *KeyIO) SetGraph(graph Graph) { o.graph = graph } -func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { +func (o *KeyIO) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] { if cached, ok := o.cache.Load(laddr); ok { dlog.Tracef(ctx, "cache-hit node@%v", laddr) return cached @@ -154,7 +153,7 @@ func (o *Handle) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *disk return ref } -func (o *Handle) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { +func (o *KeyIO) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { o.mu.Lock() defer o.mu.Unlock() if o.graph.Nodes[ptr.Node].Level != 0 { diff --git a/lib/btrfsutil/rebuilt_tree.go b/lib/btrfsutil/rebuilt_tree.go index 39d8871..e61b45b 100644 --- a/lib/btrfsutil/rebuilt_tree.go +++ b/lib/btrfsutil/rebuilt_tree.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: GPL-2.0-or-later -package btrees +package btrfsutil import ( "context" @@ -15,7 +15,6 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim" "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" - "git.lukeshu.com/btrfs-progs-ng/lib/btrfsprogs/btrfsinspect/rebuildnodes/keyio" "git.lukeshu.com/btrfs-progs-ng/lib/containers" "git.lukeshu.com/btrfs-progs-ng/lib/maps" "git.lukeshu.com/btrfs-progs-ng/lib/slices" @@ -136,7 +135,7 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati // // Do not mutate the returned map; it is a pointer to the // RebuiltTree's internal map! -func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { +func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID)) return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny) } @@ -146,7 +145,7 @@ func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsp // // Do not mutate the returned map; it is a pointer to the // RebuiltTree's internal map! -func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { +func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] { ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID)) return tree.items(ctx, &tree.forrest.excItems, func(roots containers.Set[btrfsvol.LogicalAddr]) bool { @@ -154,7 +153,7 @@ func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedM }) } -type itemIndex = containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] +type itemIndex = containers.SortedMap[btrfsprim.Key, ItemPtr] type itemStats struct { Leafs textui.Portion[int] @@ -169,7 +168,7 @@ func (s itemStats) String() string { func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex], leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool, -) *containers.SortedMap[btrfsprim.Key, keyio.ItemPtr] { +) *containers.SortedMap[btrfsprim.Key, ItemPtr] { tree.mu.RLock() defer tree.mu.RUnlock() @@ -186,12 +185,12 @@ func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfspr stats.Leafs.D = len(leafs) progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - index := new(containers.SortedMap[btrfsprim.Key, keyio.ItemPtr]) + index := new(containers.SortedMap[btrfsprim.Key, ItemPtr]) for i, leaf := range leafs { stats.Leafs.N = i progressWriter.Set(stats) for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items { - newPtr := keyio.ItemPtr{ + newPtr := ItemPtr{ Node: leaf, Idx: j, } -- cgit v1.2.3-54-g00ecf From 84099499feb558a01253bd272563fa1271527a75 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 12 Mar 2023 17:09:51 -0600 Subject: Update identifiers and comments to reflect new file/package names --- cmd/btrfs-rec/inspect/mount/mount.go | 2 +- cmd/btrfs-rec/inspect/rebuildmappings/process.go | 4 +- .../rebuildmappings/process_matchsums_exact.go | 2 +- .../rebuildmappings/process_matchsums_fuzzy.go | 2 +- cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go | 2 +- .../inspect/rebuildtrees/rebuild_treecb.go | 8 +- .../inspect/rebuildtrees/rebuild_wantcb.go | 12 +- cmd/btrfs-rec/inspect_lsfiles.go | 2 +- lib/btrfscheck/graph.go | 12 +- lib/btrfsutil/graph.go | 36 ++--- lib/btrfsutil/graph_loops.go | 4 +- lib/btrfsutil/old_rebuilt_forrest.go | 153 ++++++++++----------- lib/btrfsutil/rebuilt_forrest.go | 22 +-- 13 files changed, 128 insertions(+), 133 deletions(-) (limited to 'lib/btrfsutil') diff --git a/cmd/btrfs-rec/inspect/mount/mount.go b/cmd/btrfs-rec/inspect/mount/mount.go index 7e68ae6..da0bbb6 100644 --- a/cmd/btrfs-rec/inspect/mount/mount.go +++ b/cmd/btrfs-rec/inspect/mount/mount.go @@ -44,7 +44,7 @@ func MountRO(ctx context.Context, fs *btrfs.FS, mountpoint string, noChecksums b rootSubvol := &subvolume{ Subvolume: btrfs.Subvolume{ - FS: btrfsutil.NewBrokenTrees(ctx, fs), + FS: btrfsutil.NewOldRebuiltForrest(ctx, fs), TreeID: btrfsprim.FS_TREE_OBJECTID, NoChecksums: noChecksums, }, diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process.go b/cmd/btrfs-rec/inspect/rebuildmappings/process.go index 5f41f15..4ef4dac 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process.go @@ -161,14 +161,14 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) physicalSums := ExtractPhysicalSums(scanResults) logicalSums := ExtractLogicalSums(ctx, scanResults) - if err := matchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { + if err := matchBlockGroupSumsExact(ctx, fs, bgs, physicalSums, logicalSums); err != nil { return err } dlog.Info(ctx, "... done searching for exact block groups") ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6") dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) - if err := fuzzyMatchBlockGroupSums(ctx, fs, bgs, physicalSums, logicalSums); err != nil { + if err := matchBlockGroupSumsFuzzy(ctx, fs, bgs, physicalSums, logicalSums); err != nil { return err } dlog.Info(_ctx, "... done searching for fuzzy block groups") diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go index a3e724e..533ae67 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_exact.go @@ -17,7 +17,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/maps" ) -func matchBlockGroupSums(ctx context.Context, +func matchBlockGroupSumsExact(ctx context.Context, fs *btrfs.FS, blockgroups map[btrfsvol.LogicalAddr]BlockGroup, physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go index 4724c12..d6ea338 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go @@ -37,7 +37,7 @@ func (a fuzzyRecord) Compare(b fuzzyRecord) int { } } -func fuzzyMatchBlockGroupSums(ctx context.Context, +func matchBlockGroupSumsFuzzy(ctx context.Context, fs *btrfs.FS, blockgroups map[btrfsvol.LogicalAddr]BlockGroup, physicalSums map[btrfsvol.DeviceID]btrfssum.SumRun[btrfsvol.PhysicalAddr], diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go index 6492d9c..565cb06 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go @@ -213,7 +213,7 @@ func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { progress.NumAugments = o.numAugments progress.NumAugmentTrees = len(o.augmentQueue) progressWriter.Set(progress) - } else if !btrfscheck.HandleWouldBeNoOp(key.ItemType) { + } else if !btrfscheck.HandleItemWouldBeNoOp(key.ItemType) { o.settledItemQueue.Insert(key) } } diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go index d58689c..e6a0777 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_treecb.go @@ -13,7 +13,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" ) -// AddedItem implements btrees.Callbacks. +// AddedItem implements btrfsutil.RebuiltForrestCallbacks. func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) { o.addedItemQueue.Insert(keyAndTree{ TreeID: tree, @@ -21,14 +21,14 @@ func (o *rebuilder) AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btr }) } -// AddedRoot implements btrees.Callbacks. +// AddedRoot implements btrfsutil.RebuiltForrestCallbacks. func (o *rebuilder) AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) { if retries := o.retryItemQueue[tree]; retries != nil { o.addedItemQueue.InsertFrom(retries) } } -// LookupRoot implements btrees.Callbacks. +// LookupRoot implements btrfsutil.RebuiltForrestCallbacks. func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) { wantKey := WantWithTree{ TreeID: btrfsprim.ROOT_TREE_OBJECTID, @@ -59,7 +59,7 @@ func (o *rebuilder) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offse } } -// LookupUUID implements btrees.Callbacks. +// LookupUUID implements btrfsutil.RebuiltForrestCallbacks. func (o *rebuilder) LookupUUID(ctx context.Context, uuid btrfsprim.UUID) (id btrfsprim.ObjID, ok bool) { wantKey := WantWithTree{ TreeID: btrfsprim.UUID_TREE_OBJECTID, diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go index 06be630..4a5029e 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wantcb.go @@ -19,12 +19,12 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/containers" ) -// fsErr implements rebuildCallbacks. +// FSErr implements btrfscheck.GraphCallbacks. func (o *rebuilder) FSErr(ctx context.Context, e error) { dlog.Errorf(ctx, "filesystem error: %v", e) } -// want implements rebuildCallbacks. +// Want implements btrfscheck.GraphCallbacks. func (o *rebuilder) Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) { wantKey := WantWithTree{ TreeID: treeID, @@ -73,7 +73,7 @@ func (o *rebuilder) _want(ctx context.Context, wantKey WantWithTree) (key btrfsp return btrfsprim.Key{}, false } -// wantOff implements rebuildCallbacks. +// WantOff implements btrfscheck.GraphCallbacks. func (o *rebuilder) WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) { wantKey := WantWithTree{ TreeID: treeID, @@ -117,7 +117,7 @@ func (o *rebuilder) _wantOff(ctx context.Context, wantKey WantWithTree) (ok bool return false } -// wantDirIndex implements rebuildCallbacks. +// WantDirIndex implements btrfscheck.GraphCallbacks. func (o *rebuilder) WantDirIndex(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, name []byte) { wantKey := WantWithTree{ TreeID: treeID, @@ -350,7 +350,7 @@ func (o *rebuilder) _wantRange( }) } -// func implements rebuildCallbacks. +// WantCSum implements btrfscheck.GraphCallbacks. // // interval is [beg, end) func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inode btrfsprim.ObjID, beg, end btrfsvol.LogicalAddr) { @@ -391,7 +391,7 @@ func (o *rebuilder) WantCSum(ctx context.Context, reason string, inodeTree, inod uint64(roundDown(beg, btrfssum.BlockSize)), uint64(roundUp(end, btrfssum.BlockSize))) } -// wantFileExt implements rebuildCallbacks. +// WantFileExt implements btrfscheck.GraphCallbacks. func (o *rebuilder) WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) { o._wantRange( ctx, reason, diff --git a/cmd/btrfs-rec/inspect_lsfiles.go b/cmd/btrfs-rec/inspect_lsfiles.go index d8da481..4f985ff 100644 --- a/cmd/btrfs-rec/inspect_lsfiles.go +++ b/cmd/btrfs-rec/inspect_lsfiles.go @@ -48,7 +48,7 @@ func init() { ctx := cmd.Context() printSubvol(out, "", true, "/", &btrfs.Subvolume{ - FS: btrfsutil.NewBrokenTrees(ctx, fs), + FS: btrfsutil.NewOldRebuiltForrest(ctx, fs), TreeID: btrfsprim.FS_TREE_OBJECTID, }) diff --git a/lib/btrfscheck/graph.go b/lib/btrfscheck/graph.go index c35baf8..ea51818 100644 --- a/lib/btrfscheck/graph.go +++ b/lib/btrfscheck/graph.go @@ -14,7 +14,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol" ) -type rebuildCallbacks interface { +type GraphCallbacks interface { FSErr(ctx context.Context, e error) Want(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType) WantOff(ctx context.Context, reason string, treeID btrfsprim.ObjID, objID btrfsprim.ObjID, typ btrfsprim.ItemType, off uint64) @@ -23,9 +23,9 @@ type rebuildCallbacks interface { WantFileExt(ctx context.Context, reason string, treeID btrfsprim.ObjID, ino btrfsprim.ObjID, size int64) } -// handleWouldBeNoOp returns whether or not a call to handleItem for a -// given item type would be a no-op. -func HandleWouldBeNoOp(typ btrfsprim.ItemType) bool { +// HandleItemWouldBeNoOp returns whether or not a call to HandleItem +// for a given item type would be a no-op. +func HandleItemWouldBeNoOp(typ btrfsprim.ItemType) bool { switch typ { case // btrfsitem.Dev btrfsprim.DEV_ITEM_KEY, @@ -45,7 +45,7 @@ func HandleWouldBeNoOp(typ btrfsprim.ItemType) bool { } } -func HandleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { +func HandleItem(o GraphCallbacks, ctx context.Context, treeID btrfsprim.ObjID, item btrfstree.Item) { // Notionally, just express the relationships shown in // https://btrfs.wiki.kernel.org/index.php/File:References.png (from the page // https://btrfs.wiki.kernel.org/index.php/Data_Structures ) @@ -186,7 +186,7 @@ func HandleItem(o rebuildCallbacks, ctx context.Context, treeID btrfsprim.ObjID, case btrfsitem.FILE_EXTENT_INLINE: // nothing case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC: - // NB: o.wantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us. + // NB: o.WantCSum checks inodeBody.Flags.Has(btrfsitem.INODE_NODATASUM) for us. o.WantCSum(ctx, "data sum", treeID, item.Key.ObjectID, body.BodyExtent.DiskByteNr, diff --git a/lib/btrfsutil/graph.go b/lib/btrfsutil/graph.go index f294a28..b4a8b72 100644 --- a/lib/btrfsutil/graph.go +++ b/lib/btrfsutil/graph.go @@ -23,7 +23,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) -type Node struct { +type GraphNode struct { Level uint8 Generation btrfsprim.Generation Owner btrfsprim.ObjID @@ -33,7 +33,7 @@ type Node struct { Items []btrfsprim.Key } -func (n Node) String() string { +func (n GraphNode) String() string { if reflect.ValueOf(n).IsZero() { return "{}" } @@ -43,9 +43,9 @@ func (n Node) String() string { n.MaxItem.ObjectID, n.MaxItem.ItemType, n.MaxItem.Offset) } -type Edge struct { +type GraphEdge struct { // It is invalid for both 'FromRoot' and 'FromNode' to be - // non-zero. If both are zero, then the Edge is from the + // non-zero. If both are zero, then the GraphEdge is from the // superblock. FromRoot btrfsvol.LogicalAddr FromNode btrfsvol.LogicalAddr @@ -59,7 +59,7 @@ type Edge struct { ToGeneration btrfsprim.Generation } -func (kp Edge) String() string { +func (kp GraphEdge) String() string { var from string switch { case kp.FromRoot != 0: @@ -80,13 +80,13 @@ func (kp Edge) String() string { } type Graph struct { - Nodes map[btrfsvol.LogicalAddr]Node + Nodes map[btrfsvol.LogicalAddr]GraphNode BadNodes map[btrfsvol.LogicalAddr]error - EdgesFrom map[btrfsvol.LogicalAddr][]*Edge - EdgesTo map[btrfsvol.LogicalAddr][]*Edge + EdgesFrom map[btrfsvol.LogicalAddr][]*GraphEdge + EdgesTo map[btrfsvol.LogicalAddr][]*GraphEdge } -func (g Graph) insertEdge(ptr *Edge) { +func (g Graph) insertEdge(ptr *GraphEdge) { if ptr.ToNode == 0 { panic("kp.ToNode should not be zero") } @@ -112,7 +112,7 @@ func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) { if treeInfo.RootNode == 0 { return } - g.insertEdge(&Edge{ + g.insertEdge(&GraphEdge{ FromTree: treeID, ToNode: treeInfo.RootNode, ToLevel: treeInfo.Level, @@ -122,10 +122,10 @@ func (g Graph) insertTreeRoot(sb btrfstree.Superblock, treeID btrfsprim.ObjID) { func NewGraph(sb btrfstree.Superblock) *Graph { g := &Graph{ - Nodes: make(map[btrfsvol.LogicalAddr]Node), + Nodes: make(map[btrfsvol.LogicalAddr]GraphNode), BadNodes: make(map[btrfsvol.LogicalAddr]error), - EdgesFrom: make(map[btrfsvol.LogicalAddr][]*Edge), - EdgesTo: make(map[btrfsvol.LogicalAddr][]*Edge), + EdgesFrom: make(map[btrfsvol.LogicalAddr][]*GraphEdge), + EdgesTo: make(map[btrfsvol.LogicalAddr][]*GraphEdge), } // These 4 trees are mentioned directly in the superblock, so @@ -139,7 +139,7 @@ func NewGraph(sb btrfstree.Superblock) *Graph { } func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node]) { - nodeData := Node{ + nodeData := GraphNode{ Level: nodeRef.Data.Head.Level, Generation: nodeRef.Data.Head.Generation, Owner: nodeRef.Data.Head.Owner, @@ -155,14 +155,14 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No cnt++ } } - kps := make([]Edge, 0, cnt) + kps := make([]GraphEdge, 0, cnt) keys := make([]btrfsprim.Key, len(nodeRef.Data.BodyLeaf)) nodeData.Items = keys g.Nodes[nodeRef.Addr] = nodeData for i, item := range nodeRef.Data.BodyLeaf { keys[i] = item.Key if itemBody, ok := item.Body.(*btrfsitem.Root); ok { - kps = append(kps, Edge{ + kps = append(kps, GraphEdge{ FromRoot: nodeRef.Addr, FromItem: i, FromTree: item.Key.ObjectID, @@ -175,9 +175,9 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No } } else { g.Nodes[nodeRef.Addr] = nodeData - kps := make([]Edge, len(nodeRef.Data.BodyInternal)) + kps := make([]GraphEdge, len(nodeRef.Data.BodyInternal)) for i, kp := range nodeRef.Data.BodyInternal { - kps[i] = Edge{ + kps[i] = GraphEdge{ FromNode: nodeRef.Addr, FromItem: i, FromTree: nodeRef.Data.Head.Owner, diff --git a/lib/btrfsutil/graph_loops.go b/lib/btrfsutil/graph_loops.go index 9563d19..3382705 100644 --- a/lib/btrfsutil/graph_loops.go +++ b/lib/btrfsutil/graph_loops.go @@ -42,7 +42,7 @@ func (g Graph) renderNode(node btrfsvol.LogicalAddr) []string { } } -func (g Graph) renderEdge(kp Edge) []string { +func (g Graph) renderEdge(kp GraphEdge) []string { a := fmt.Sprintf("[%d]={", kp.FromItem) b := strings.Repeat(" ", len(a)) ret := []string{ @@ -110,7 +110,7 @@ func (g Graph) renderLoop(stack []btrfsvol.LogicalAddr) []string { return lines } -func checkNodeExpectations(kp Edge, toNode Node) error { +func checkNodeExpectations(kp GraphEdge, toNode GraphNode) error { var errs derror.MultiError if toNode.Level != kp.ToLevel { errs = append(errs, fmt.Errorf("kp.level=%v != node.level=%v", diff --git a/lib/btrfsutil/old_rebuilt_forrest.go b/lib/btrfsutil/old_rebuilt_forrest.go index b7663fa..2386803 100644 --- a/lib/btrfsutil/old_rebuilt_forrest.go +++ b/lib/btrfsutil/old_rebuilt_forrest.go @@ -21,60 +21,60 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/diskio" ) -type treeIndex struct { - TreeRootErr error - Items *containers.RBTree[treeIndexValue] - Errors *containers.IntervalTree[btrfsprim.Key, treeIndexError] +type oldRebuiltTree struct { + RootErr error + Items *containers.RBTree[oldRebuiltTreeValue] + Errors *containers.IntervalTree[btrfsprim.Key, oldRebuiltTreeError] } -type treeIndexError struct { +type oldRebuiltTreeError struct { Path SkinnyPath Err error } -type treeIndexValue struct { +type oldRebuiltTreeValue struct { Path SkinnyPath Key btrfsprim.Key ItemSize uint32 } // Compare implements containers.Ordered. -func (a treeIndexValue) Compare(b treeIndexValue) int { +func (a oldRebuiltTreeValue) Compare(b oldRebuiltTreeValue) int { return a.Key.Compare(b.Key) } -func newTreeIndex(arena *SkinnyPathArena) treeIndex { - return treeIndex{ - Items: new(containers.RBTree[treeIndexValue]), - Errors: &containers.IntervalTree[btrfsprim.Key, treeIndexError]{ - MinFn: func(err treeIndexError) btrfsprim.Key { +func newOldRebuiltTree(arena *SkinnyPathArena) oldRebuiltTree { + return oldRebuiltTree{ + Items: new(containers.RBTree[oldRebuiltTreeValue]), + Errors: &containers.IntervalTree[btrfsprim.Key, oldRebuiltTreeError]{ + MinFn: func(err oldRebuiltTreeError) btrfsprim.Key { return arena.Inflate(err.Path).Node(-1).ToKey }, - MaxFn: func(err treeIndexError) btrfsprim.Key { + MaxFn: func(err oldRebuiltTreeError) btrfsprim.Key { return arena.Inflate(err.Path).Node(-1).ToMaxKey }, }, } } -type brokenTrees struct { +type OldRebuiltForrest struct { ctx context.Context //nolint:containedctx // don't have an option while keeping the same API inner *btrfs.FS arena *SkinnyPathArena // btrfsprim.ROOT_TREE_OBJECTID - rootTreeMu sync.Mutex - rootTreeIndex *treeIndex + rootTreeMu sync.Mutex + rootTree *oldRebuiltTree // for all other trees - treeMu sync.Mutex - treeIndexes map[btrfsprim.ObjID]treeIndex + treesMu sync.Mutex + trees map[btrfsprim.ObjID]oldRebuiltTree } -var _ btrfstree.TreeOperator = (*brokenTrees)(nil) +var _ btrfstree.TreeOperator = (*OldRebuiltForrest)(nil) -// NewBrokenTrees wraps a *btrfs.FS to support looking up information -// from broken trees. +// NewOldRebuiltForrest wraps a *btrfs.FS to support looking up +// information from broken trees. // // Of the btrfstree.TreeOperator methods: // @@ -87,43 +87,38 @@ var _ btrfstree.TreeOperator = (*brokenTrees)(nil) // broken tree might not be), and a bad node may cause it to not // return a truncated list of results. // -// NewBrokenTrees attempts to remedy these deficiencies by using +// NewOldRebuiltForrest attempts to remedy these deficiencies by using // .TreeWalk to build an out-of-FS index of all of the items in the // tree, and re-implements TreeLookup, TreeSearch, and TreeSearchAll // using that index. -func NewBrokenTrees(ctx context.Context, inner *btrfs.FS) interface { - btrfstree.TreeOperator - Superblock() (*btrfstree.Superblock, error) - ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) - Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) -} { - return &brokenTrees{ +func NewOldRebuiltForrest(ctx context.Context, inner *btrfs.FS) *OldRebuiltForrest { + return &OldRebuiltForrest{ ctx: ctx, inner: inner, } } -func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex { +func (bt *OldRebuiltForrest) RebuiltTree(treeID btrfsprim.ObjID) oldRebuiltTree { var treeRoot *btrfstree.TreeRoot var sb *btrfstree.Superblock var err error if treeID == btrfsprim.ROOT_TREE_OBJECTID { bt.rootTreeMu.Lock() defer bt.rootTreeMu.Unlock() - if bt.rootTreeIndex != nil { - return *bt.rootTreeIndex + if bt.rootTree != nil { + return *bt.rootTree } sb, err = bt.inner.Superblock() if err == nil { treeRoot, err = btrfstree.LookupTreeRoot(bt.inner, *sb, treeID) } } else { - bt.treeMu.Lock() - defer bt.treeMu.Unlock() - if bt.treeIndexes == nil { - bt.treeIndexes = make(map[btrfsprim.ObjID]treeIndex) + bt.treesMu.Lock() + defer bt.treesMu.Unlock() + if bt.trees == nil { + bt.trees = make(map[btrfsprim.ObjID]oldRebuiltTree) } - if cacheEntry, exists := bt.treeIndexes[treeID]; exists { + if cacheEntry, exists := bt.trees[treeID]; exists { return cacheEntry } sb, err = bt.inner.Superblock() @@ -141,23 +136,23 @@ func (bt *brokenTrees) treeIndex(treeID btrfsprim.ObjID) treeIndex { SB: _sb, } } - cacheEntry := newTreeIndex(bt.arena) + cacheEntry := newOldRebuiltTree(bt.arena) if err != nil { - cacheEntry.TreeRootErr = err + cacheEntry.RootErr = err } else { dlog.Infof(bt.ctx, "indexing tree %v...", treeID) bt.rawTreeWalk(*treeRoot, cacheEntry, nil) dlog.Infof(bt.ctx, "... done indexing tree %v", treeID) } if treeID == btrfsprim.ROOT_TREE_OBJECTID { - bt.rootTreeIndex = &cacheEntry + bt.rootTree = &cacheEntry } else { - bt.treeIndexes[treeID] = cacheEntry + bt.trees[treeID] = cacheEntry } return cacheEntry } -func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex, walked *[]btrfsprim.Key) { +func (bt *OldRebuiltForrest) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry oldRebuiltTree, walked *[]btrfsprim.Key) { btrfstree.TreeOperatorImpl{NodeSource: bt.inner}.RawTreeWalk( bt.ctx, root, @@ -167,20 +162,20 @@ func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex // indicates a bug in my item parser than a problem with the filesystem. panic(fmt.Errorf("TODO: error parsing item: %w", err)) } - cacheEntry.Errors.Insert(treeIndexError{ + cacheEntry.Errors.Insert(oldRebuiltTreeError{ Path: bt.arena.Deflate(err.Path), Err: err.Err, }) }, btrfstree.TreeWalkHandler{ Item: func(path btrfstree.TreePath, item btrfstree.Item) error { - if cacheEntry.Items.Search(func(v treeIndexValue) int { return item.Key.Compare(v.Key) }) != nil { + if cacheEntry.Items.Search(func(v oldRebuiltTreeValue) int { return item.Key.Compare(v.Key) }) != nil { // This is a panic because I'm not really sure what the best way to // handle this is, and so if this happens I want the program to crash // and force me to figure out how to handle it. panic(fmt.Errorf("dup key=%v in tree=%v", item.Key, root.TreeID)) } - cacheEntry.Items.Insert(treeIndexValue{ + cacheEntry.Items.Insert(oldRebuiltTreeValue{ Path: bt.arena.Deflate(path), Key: item.Key, ItemSize: item.BodySize, @@ -194,7 +189,7 @@ func (bt *brokenTrees) rawTreeWalk(root btrfstree.TreeRoot, cacheEntry treeIndex ) } -func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) { +func (bt *OldRebuiltForrest) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (btrfstree.Item, error) { item, err := bt.TreeSearch(treeID, btrfstree.KeySearch(key.Compare)) if err != nil { err = fmt.Errorf("item with key=%v: %w", key, err) @@ -202,11 +197,11 @@ func (bt *brokenTrees) TreeLookup(treeID btrfsprim.ObjID, key btrfsprim.Key) (bt return item, err } -func (bt *brokenTrees) addErrs(index treeIndex, fn func(btrfsprim.Key, uint32) int, err error) error { +func (bt *OldRebuiltForrest) addErrs(tree oldRebuiltTree, fn func(btrfsprim.Key, uint32) int, err error) error { var errs derror.MultiError - index.Errors.Subrange( + tree.Errors.Subrange( func(k btrfsprim.Key) int { return fn(k, 0) }, - func(v treeIndexError) bool { + func(v oldRebuiltTreeError) bool { errs = append(errs, &btrfstree.TreeError{ Path: bt.arena.Inflate(v.Path), Err: v.Err, @@ -222,24 +217,24 @@ func (bt *brokenTrees) addErrs(index treeIndex, fn func(btrfsprim.Key, uint32) i return errs } -func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) (btrfstree.Item, error) { - index := bt.treeIndex(treeID) - if index.TreeRootErr != nil { - return btrfstree.Item{}, index.TreeRootErr +func (bt *OldRebuiltForrest) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) (btrfstree.Item, error) { + tree := bt.RebuiltTree(treeID) + if tree.RootErr != nil { + return btrfstree.Item{}, tree.RootErr } - indexItem := index.Items.Search(func(indexItem treeIndexValue) int { + indexItem := tree.Items.Search(func(indexItem oldRebuiltTreeValue) int { return fn(indexItem.Key, indexItem.ItemSize) }) if indexItem == nil { - return btrfstree.Item{}, bt.addErrs(index, fn, iofs.ErrNotExist) + return btrfstree.Item{}, bt.addErrs(tree, fn, iofs.ErrNotExist) } itemPath := bt.arena.Inflate(indexItem.Value.Path) node, err := bt.inner.ReadNode(itemPath.Parent()) defer btrfstree.FreeNodeRef(node) if err != nil { - return btrfstree.Item{}, bt.addErrs(index, fn, err) + return btrfstree.Item{}, bt.addErrs(tree, fn, err) } item := node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx] @@ -250,21 +245,21 @@ func (bt *brokenTrees) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, return item, nil } -func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) ([]btrfstree.Item, error) { - index := bt.treeIndex(treeID) - if index.TreeRootErr != nil { - return nil, index.TreeRootErr +func (bt *OldRebuiltForrest) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.Key, uint32) int) ([]btrfstree.Item, error) { + tree := bt.RebuiltTree(treeID) + if tree.RootErr != nil { + return nil, tree.RootErr } - var indexItems []treeIndexValue - index.Items.Subrange( - func(indexItem treeIndexValue) int { return fn(indexItem.Key, indexItem.ItemSize) }, - func(node *containers.RBNode[treeIndexValue]) bool { + var indexItems []oldRebuiltTreeValue + tree.Items.Subrange( + func(indexItem oldRebuiltTreeValue) int { return fn(indexItem.Key, indexItem.ItemSize) }, + func(node *containers.RBNode[oldRebuiltTreeValue]) bool { indexItems = append(indexItems, node.Value) return true }) if len(indexItems) == 0 { - return nil, bt.addErrs(index, fn, iofs.ErrNotExist) + return nil, bt.addErrs(tree, fn, iofs.ErrNotExist) } ret := make([]btrfstree.Item, len(indexItems)) @@ -277,7 +272,7 @@ func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.K node, err = bt.inner.ReadNode(itemPath.Parent()) if err != nil { btrfstree.FreeNodeRef(node) - return nil, bt.addErrs(index, fn, err) + return nil, bt.addErrs(tree, fn, err) } } ret[i] = node.Data.BodyLeaf[itemPath.Node(-1).FromItemIdx] @@ -285,23 +280,23 @@ func (bt *brokenTrees) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfsprim.K } btrfstree.FreeNodeRef(node) - return ret, bt.addErrs(index, fn, nil) + return ret, bt.addErrs(tree, fn, nil) } -func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) { - index := bt.treeIndex(treeID) - if index.TreeRootErr != nil { +func (bt *OldRebuiltForrest) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, errHandle func(*btrfstree.TreeError), cbs btrfstree.TreeWalkHandler) { + tree := bt.RebuiltTree(treeID) + if tree.RootErr != nil { errHandle(&btrfstree.TreeError{ Path: btrfstree.TreePath{{ FromTree: treeID, ToMaxKey: btrfsprim.MaxKey, }}, - Err: index.TreeRootErr, + Err: tree.RootErr, }) return } var node *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.Node] - index.Items.Range(func(indexItem *containers.RBNode[treeIndexValue]) bool { + tree.Items.Range(func(indexItem *containers.RBNode[oldRebuiltTreeValue]) bool { if ctx.Err() != nil { return false } @@ -330,22 +325,22 @@ func (bt *brokenTrees) TreeWalk(ctx context.Context, treeID btrfsprim.ObjID, err btrfstree.FreeNodeRef(node) } -func (bt *brokenTrees) Superblock() (*btrfstree.Superblock, error) { +func (bt *OldRebuiltForrest) Superblock() (*btrfstree.Superblock, error) { return bt.inner.Superblock() } -func (bt *brokenTrees) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { +func (bt *OldRebuiltForrest) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) { return bt.inner.ReadAt(p, off) } -func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) { +func (bt *OldRebuiltForrest) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.LogicalAddr) ([]btrfsprim.Key, error) { sb, err := bt.Superblock() if err != nil { return nil, err } - index := bt.treeIndex(treeID) - if index.TreeRootErr != nil { - return nil, index.TreeRootErr + tree := bt.RebuiltTree(treeID) + if tree.RootErr != nil { + return nil, tree.RootErr } nodeRef, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](bt.inner, *sb, nodeAddr, btrfstree.NodeExpectations{}) defer btrfstree.FreeNodeRef(nodeRef) @@ -358,6 +353,6 @@ func (bt *brokenTrees) Augment(treeID btrfsprim.ObjID, nodeAddr btrfsvol.Logical RootNode: nodeAddr, Level: nodeRef.Data.Head.Level, Generation: nodeRef.Data.Head.Generation, - }, index, &ret) + }, tree, &ret) return ret, nil } diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go index 8d4b810..3dfb24c 100644 --- a/lib/btrfsutil/rebuilt_forrest.go +++ b/lib/btrfsutil/rebuilt_forrest.go @@ -18,7 +18,7 @@ import ( "git.lukeshu.com/btrfs-progs-ng/lib/textui" ) -type Callbacks interface { +type RebuiltForrestCallbacks interface { AddedItem(ctx context.Context, tree btrfsprim.ObjID, key btrfsprim.Key) AddedRoot(ctx context.Context, tree btrfsprim.ObjID, root btrfsvol.LogicalAddr) LookupRoot(ctx context.Context, tree btrfsprim.ObjID) (offset btrfsprim.Generation, item btrfsitem.Root, ok bool) @@ -29,23 +29,23 @@ type Callbacks interface { // potentially broken btrees. // // It is conceptually a btrfstree.TreeOperator, and adds similar -// broken-tree handling to btrfsutil.BrokenForrest. However, the API -// is different thant btrfstree.TreeOperator, and is much more -// efficient than btrfsutil.BrokenForrest. +// broken-tree handling to OldRebuiltForrest. However, the API is +// different than btrfstree.TreeOperator, and is much more efficient +// than OldRebuiltForrest. // // The efficiency improvements are possible because of the API // differences, which are necessary for how it is used in -// rebuildnodes: +// rebuildtrees: // -// - it consumes an already-read graph.Graph instead of reading the -// graph itself +// - it consumes an already-read Graph instead of reading the graph +// itself // // - it does not use `btrfstree.TreePath` // // - it does not keep track of errors encountered in a tree // -// Additionally, it provides some functionality that -// btrfsutil.BrokenForrest does not: +// Additionally, it provides some functionality that OldRebuiltForrest +// does not: // // - it provides a .LeafToRoots() method to advise on what // additional roots should be added @@ -60,7 +60,7 @@ type RebuiltForrest struct { sb btrfstree.Superblock graph Graph keyIO *KeyIO - cb Callbacks + cb RebuiltForrestCallbacks // mutable treesMu nestedMutex @@ -72,7 +72,7 @@ type RebuiltForrest struct { // NewRebuiltForrest returns a new RebuiltForrest instance. All of // the callbacks must be non-nil. -func NewRebuiltForrest(sb btrfstree.Superblock, graph Graph, keyIO *KeyIO, cb Callbacks) *RebuiltForrest { +func NewRebuiltForrest(sb btrfstree.Superblock, graph Graph, keyIO *KeyIO, cb RebuiltForrestCallbacks) *RebuiltForrest { return &RebuiltForrest{ sb: sb, graph: graph, -- cgit v1.2.3-54-g00ecf From fb3595976840203649ce898efd7b14af924b86f0 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 12 Mar 2023 17:11:40 -0600 Subject: Update error messages to reflect new file/package names --- lib/btrfsutil/rebuilt_readitem.go | 6 +++--- lib/btrfsutil/rebuilt_tree.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'lib/btrfsutil') diff --git a/lib/btrfsutil/rebuilt_readitem.go b/lib/btrfsutil/rebuilt_readitem.go index aa432bb..57440cf 100644 --- a/lib/btrfsutil/rebuilt_readitem.go +++ b/lib/btrfsutil/rebuilt_readitem.go @@ -157,14 +157,14 @@ func (o *KeyIO) ReadItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item { o.mu.Lock() defer o.mu.Unlock() if o.graph.Nodes[ptr.Node].Level != 0 { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for non-leaf node@%v", ptr.Node)) + panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for non-leaf node@%v", ptr.Node)) } if ptr.Idx < 0 { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for negative item index: %v", ptr.Idx)) + panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for negative item index: %v", ptr.Idx)) } items := o.readNode(ctx, ptr.Node).Data.BodyLeaf if ptr.Idx >= len(items) { - panic(fmt.Errorf("should not happen: keyio.Handle.ReadItem called for out-of-bounds item index: index=%v len=%v", + panic(fmt.Errorf("should not happen: btrfsutil.KeyIO.ReadItem called for out-of-bounds item index: index=%v len=%v", ptr.Idx, len(items))) } return items[ptr.Idx].Body.CloneItem() diff --git a/lib/btrfsutil/rebuilt_tree.go b/lib/btrfsutil/rebuilt_tree.go index e61b45b..2f6afbe 100644 --- a/lib/btrfsutil/rebuilt_tree.go +++ b/lib/btrfsutil/rebuilt_tree.go @@ -327,7 +327,7 @@ func (tree *RebuiltTree) COWDistance(parentID btrfsprim.ObjID) (dist int, ok boo func (tree *RebuiltTree) ReadItem(ctx context.Context, key btrfsprim.Key) btrfsitem.Item { ptr, ok := tree.Items(ctx).Load(key) if !ok { - panic(fmt.Errorf("should not happen: btrees.RebuiltTree.ReadItem called for not-included key: %v", key)) + panic(fmt.Errorf("should not happen: btrfsutil.RebuiltTree.ReadItem called for not-included key: %v", key)) } return tree.forrest.keyIO.ReadItem(ctx, ptr) } -- cgit v1.2.3-54-g00ecf From e92796fed05143239733d3feec0231a69af2f617 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Sun, 12 Mar 2023 17:09:41 -0600 Subject: Update log field names to reflect new file/package names --- cmd/btrfs-rec/inspect/rebuildmappings/process.go | 14 ++-- .../rebuildmappings/process_matchsums_fuzzy.go | 4 +- cmd/btrfs-rec/inspect/rebuildmappings/scan.go | 2 +- cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go | 30 ++++---- .../inspect/rebuildtrees/rebuild_wanttyp.go | 4 +- cmd/btrfs-rec/inspect/rebuildtrees/scan.go | 3 +- lib/btrfsutil/graph.go | 9 +-- lib/btrfsutil/rebuilt_forrest.go | 2 +- lib/btrfsutil/rebuilt_tree.go | 8 +- lib/textui/log.go | 88 ++++++++++++---------- 10 files changed, 87 insertions(+), 77 deletions(-) (limited to 'lib/btrfsutil') diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process.go b/cmd/btrfs-rec/inspect/rebuildmappings/process.go index 4ef4dac..7ce3748 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process.go @@ -54,7 +54,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR dlog.Infof(ctx, "plan: 6/6 search for block groups in checksum map (fuzzy)") _ctx := ctx - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "1/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "1/6") dlog.Infof(_ctx, "1/6: Processing %d chunks...", numChunks) for _, devID := range devIDs { devResults := scanResults[devID] @@ -68,7 +68,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR } dlog.Info(_ctx, "... done processing chunks") - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "2/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "2/6") dlog.Infof(_ctx, "2/6: Processing %d device extents...", numDevExts) for _, devID := range devIDs { devResults := scanResults[devID] @@ -84,7 +84,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR // too much. (Because nodes are numerous and small, while the // others are few and large; so it is likely that many of the // nodes will be subsumed by other things.) - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "3/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "3/6") dlog.Infof(_ctx, "3/6: Processing %d nodes...", numNodes) for _, devID := range devIDs { devResults := scanResults[devID] @@ -109,7 +109,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR // Use block groups to add missing flags (and as a hint to // combine node entries). - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "4/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "4/6") dlog.Infof(_ctx, "4/6: Processing %d block groups...", numBlockGroups) // First dedup them, because they change for allocations and // CoW means that they'll bounce around a lot, so you likely @@ -157,7 +157,7 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR // The fuzzy-search is only fast because the exact-search is so good at getting `physicalBlocks` down. // Empirically: if I remove the exact-search step, then the fuzzy-match step is more than an order of magnitude // slower. - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "5/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "5/6") dlog.Infof(_ctx, "5/6: Searching for %d block groups in checksum map (exact)...", len(bgs)) physicalSums := ExtractPhysicalSums(scanResults) logicalSums := ExtractLogicalSums(ctx, scanResults) @@ -166,14 +166,14 @@ func RebuildMappings(ctx context.Context, fs *btrfs.FS, scanResults ScanDevicesR } dlog.Info(ctx, "... done searching for exact block groups") - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "6/6") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "6/6") dlog.Infof(_ctx, "6/6: Searching for %d block groups in checksum map (fuzzy)...", len(bgs)) if err := matchBlockGroupSumsFuzzy(ctx, fs, bgs, physicalSums, logicalSums); err != nil { return err } dlog.Info(_ctx, "... done searching for fuzzy block groups") - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.step", "report") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.step", "report") dlog.Info(_ctx, "report:") unmappedPhysicalRegions := ListUnmappedPhysicalRegions(fs) diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go index d6ea338..00f367f 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/process_matchsums_fuzzy.go @@ -45,7 +45,7 @@ func matchBlockGroupSumsFuzzy(ctx context.Context, ) error { _ctx := ctx - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "indexing") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.substep", "indexing") dlog.Info(ctx, "Indexing physical regions...") // O(m) regions := ListUnmappedPhysicalRegions(fs) physicalIndex := make(map[btrfssum.ShortSum][]btrfsvol.QualifiedPhysicalAddr) @@ -62,7 +62,7 @@ func matchBlockGroupSumsFuzzy(ctx context.Context, } dlog.Info(ctx, "... done indexing") - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-mappings.substep", "searching") + ctx = dlog.WithField(_ctx, "btrfs.inspect.rebuild-mappings.process.substep", "searching") dlog.Info(ctx, "Searching...") numBlockgroups := len(blockgroups) for i, bgLAddr := range maps.SortedKeys(blockgroups) { diff --git a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go index b67beaf..2128a48 100644 --- a/cmd/btrfs-rec/inspect/rebuildmappings/scan.go +++ b/cmd/btrfs-rec/inspect/rebuildmappings/scan.go @@ -110,7 +110,7 @@ var sbSize = btrfsvol.PhysicalAddr(binstruct.StaticSize(btrfstree.Superblock{})) // ScanOneDevice mostly mimics btrfs-progs // cmds/rescue-chunk-recover.c:scan_one_device(). func ScanOneDevice(ctx context.Context, dev *btrfs.Device, sb btrfstree.Superblock) (ScanOneDeviceResult, error) { - ctx = dlog.WithField(ctx, "btrfsinspect.scandevices.dev", dev.Name()) + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-mappings.scan.dev", dev.Name()) result := ScanOneDeviceResult{ FoundNodes: make(map[btrfsvol.LogicalAddr][]btrfsvol.PhysicalAddr), diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go index 565cb06..bbfcdde 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild.go @@ -75,7 +75,7 @@ type Rebuilder interface { } func NewRebuilder(ctx context.Context, fs *btrfs.FS, nodeScanResults rebuildmappings.ScanDevicesResult) (Rebuilder, error) { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "read-fs-data") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.step", "read-fs-data") sb, nodeGraph, keyIO, err := ScanDevices(ctx, fs, nodeScanResults) // ScanDevices does its own logging if err != nil { return nil, err @@ -95,7 +95,7 @@ func (o *rebuilder) ListRoots(ctx context.Context) map[btrfsprim.ObjID]container } func (o *rebuilder) Rebuild(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.step", "rebuild") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.step", "rebuild") // Initialize o.retryItemQueue = make(map[btrfsprim.ObjID]containers.Set[keyAndTree]) @@ -113,7 +113,7 @@ func (o *rebuilder) Rebuild(ctx context.Context) error { // Run for passNum := 0; len(o.treeQueue) > 0 || len(o.addedItemQueue) > 0 || len(o.settledItemQueue) > 0 || len(o.augmentQueue) > 0; passNum++ { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.pass", passNum) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.pass", passNum) // Crawl trees (Drain o.treeQueue, fill o.addedItemQueue). if err := o.processTreeQueue(ctx); err != nil { @@ -146,7 +146,7 @@ func (o *rebuilder) Rebuild(ctx context.Context) error { // processTreeQueue drains o.treeQueue, filling o.addedItemQueue. func (o *rebuilder) processTreeQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "collect-items") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep", "collect-items") queue := maps.SortedKeys(o.treeQueue) o.treeQueue = make(containers.Set[btrfsprim.ObjID]) @@ -180,7 +180,7 @@ func (s settleItemStats) String() string { // processAddedItemQueue drains o.addedItemQueue, filling o.augmentQueue and o.settledItemQueue. func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "settle-items") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep", "settle-items") queue := maps.Keys(o.addedItemQueue) o.addedItemQueue = make(containers.Set[keyAndTree]) @@ -191,13 +191,13 @@ func (o *rebuilder) processAddedItemQueue(ctx context.Context) error { var progress settleItemStats progress.D = len(queue) progressWriter := textui.NewProgress[settleItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep.progress", &progress) for i, key := range queue { progress.N = i progressWriter.Set(progress) - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.settle.item", key) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.settle.item", key) tree := o.rebuilt.Tree(ctx, key.TreeID) incPtr, ok := tree.Items(ctx).Load(key.Key) if !ok { @@ -239,7 +239,7 @@ func (s processItemStats) String() string { // processSettledItemQueue drains o.settledItemQueue, filling o.augmentQueue and o.treeQueue. func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "process-items") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep", "process-items") queue := maps.Keys(o.settledItemQueue) o.settledItemQueue = make(containers.Set[keyAndTree]) @@ -250,7 +250,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { var progress processItemStats progress.D = len(queue) progressWriter := textui.NewProgress[processItemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep.progress", &progress) type keyAndBody struct { keyAndTree @@ -264,7 +264,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", key) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.process.item", key) item := keyAndBody{ keyAndTree: key, Body: o.rebuilt.Tree(ctx, key.TreeID).ReadItem(ctx, key.Key), @@ -280,7 +280,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { defer progressWriter.Done() o.curKey.Key.OK = true for item := range itemChan { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.process.item", item.keyAndTree) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.process.item", item.keyAndTree) o.curKey.TreeID = item.TreeID o.curKey.Key.Val = item.Key btrfscheck.HandleItem(o, ctx, item.TreeID, btrfstree.Item{ @@ -304,7 +304,7 @@ func (o *rebuilder) processSettledItemQueue(ctx context.Context) error { // processAugmentQueue drains o.augmentQueue (and maybe o.retryItemQueue), filling o.addedItemQueue. func (o *rebuilder) processAugmentQueue(ctx context.Context) error { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep", "apply-augments") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep", "apply-augments") resolvedAugments := make(map[btrfsprim.ObjID]containers.Set[btrfsvol.LogicalAddr], len(o.augmentQueue)) var progress textui.Portion[int] @@ -312,7 +312,7 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.augment.tree", treeID) resolvedAugments[treeID] = o.resolveTreeAugments(ctx, treeID) progress.D += len(resolvedAugments[treeID]) } @@ -322,9 +322,9 @@ func (o *rebuilder) processAugmentQueue(ctx context.Context) error { runtime.GC() progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.substep.progress", &progress) + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.substep.progress", &progress) for _, treeID := range maps.SortedKeys(resolvedAugments) { - ctx := dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.augment.tree", treeID) + ctx := dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.rebuild.augment.tree", treeID) for _, nodeAddr := range maps.SortedKeys(resolvedAugments[treeID]) { if err := ctx.Err(); err != nil { progressWriter.Set(progress) diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go index fa1ba98..a517579 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/rebuild_wanttyp.go @@ -96,8 +96,8 @@ func (o WantWithTree) String() string { } const ( - logFieldItemWant = "btrfsinspect.rebuild-nodes.rebuild.want" - logFieldTreeWant = "btrfsinspect.rebuild-nodes.rebuild.add-tree.want" + logFieldItemWant = "btrfs.inspect.rebuild-trees.rebuild.want" + logFieldTreeWant = "btrfs.util.rebuilt-forrest.add-tree.want" ) func withWant(ctx context.Context, logField, reason string, wantKey WantWithTree) context.Context { diff --git a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go index 03261e0..2995a2e 100644 --- a/cmd/btrfs-rec/inspect/rebuildtrees/scan.go +++ b/cmd/btrfs-rec/inspect/rebuildtrees/scan.go @@ -32,7 +32,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults rebuildmappings. var stats textui.Portion[int] stats.D = countNodes(scanResults) progressWriter := textui.NewProgress[textui.Portion[int]]( - dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.read.substep", "read-nodes"), + dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.read.substep", "read-nodes"), dlog.LogLevelInfo, textui.Tunable(1*time.Second)) nodeGraph := btrfsutil.NewGraph(*sb) @@ -67,6 +67,7 @@ func ScanDevices(ctx context.Context, fs *btrfs.FS, scanResults rebuildmappings. progressWriter.Done() dlog.Info(ctx, "... done reading node data") + ctx = dlog.WithField(ctx, "btrfs.inspect.rebuild-trees.read.substep", "check") if err := nodeGraph.FinalCheck(ctx, fs, *sb); err != nil { return btrfstree.Superblock{}, btrfsutil.Graph{}, nil, err } diff --git a/lib/btrfsutil/graph.go b/lib/btrfsutil/graph.go index b4a8b72..8debe9d 100644 --- a/lib/btrfsutil/graph.go +++ b/lib/btrfsutil/graph.go @@ -193,10 +193,8 @@ func (g Graph) InsertNode(nodeRef *diskio.Ref[btrfsvol.LogicalAddr, btrfstree.No func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Superblock) error { var stats textui.Portion[int] - _ctx := ctx - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-keypointers") - dlog.Info(_ctx, "Checking keypointers for dead-ends...") + dlog.Info(ctx, "Checking keypointers for dead-ends...") progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) stats.D = len(g.EdgesTo) progressWriter.Set(stats) @@ -217,8 +215,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd progressWriter.Done() dlog.Info(ctx, "... done checking keypointers") - ctx = dlog.WithField(_ctx, "btrfsinspect.rebuild-nodes.read.substep", "check-for-loops") - dlog.Info(_ctx, "Checking for btree loops...") + dlog.Info(ctx, "Checking for btree loops...") stats.D = len(g.Nodes) stats.N = 0 progressWriter = textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second)) @@ -255,7 +252,7 @@ func (g Graph) FinalCheck(ctx context.Context, fs diskio.File[btrfsvol.LogicalAd if numLoops > 0 { return fmt.Errorf("%d btree loops", numLoops) } - dlog.Info(_ctx, "... done checking for loops") + dlog.Info(ctx, "... done checking for loops") return nil } diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go index 3dfb24c..70ece13 100644 --- a/lib/btrfsutil/rebuilt_forrest.go +++ b/lib/btrfsutil/rebuilt_forrest.go @@ -118,7 +118,7 @@ func (ts *RebuiltForrest) addTree(ctx context.Context, treeID btrfsprim.ObjID, s } }() stack = append(stack, treeID) - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-tree", stack) + ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-forrest.add-tree", stack) dlog.Info(ctx, "adding tree...") if slices.Contains(treeID, stack[:len(stack)-1]) { dlog.Errorf(ctx, "failed to add tree: loop detected: %v", stack) diff --git a/lib/btrfsutil/rebuilt_tree.go b/lib/btrfsutil/rebuilt_tree.go index 2f6afbe..1009204 100644 --- a/lib/btrfsutil/rebuilt_tree.go +++ b/lib/btrfsutil/rebuilt_tree.go @@ -48,7 +48,7 @@ type RebuiltTree struct { // .isOwnerOK, whether or not they're in the tree. func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-nodes", fmt.Sprintf("tree=%v", tree.ID)) + ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-nodes", fmt.Sprintf("tree=%v", tree.ID)) nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) @@ -136,7 +136,7 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati // Do not mutate the returned map; it is a pointer to the // RebuiltTree's internal map! func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-inc-items", fmt.Sprintf("tree=%v", tree.ID)) + ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-inc-items", fmt.Sprintf("tree=%v", tree.ID)) return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny) } @@ -146,7 +146,7 @@ func (tree *RebuiltTree) Items(ctx context.Context) *containers.SortedMap[btrfsp // Do not mutate the returned map; it is a pointer to the // RebuiltTree's internal map! func (tree *RebuiltTree) PotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] { - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.index-exc-items", fmt.Sprintf("tree=%v", tree.ID)) + ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-exc-items", fmt.Sprintf("tree=%v", tree.ID)) return tree.items(ctx, &tree.forrest.excItems, func(roots containers.Set[btrfsvol.LogicalAddr]) bool { return !tree.Roots.HasAny(roots) @@ -267,7 +267,7 @@ func (s rootStats) String() string { func (tree *RebuiltTree) AddRoot(ctx context.Context, rootNode btrfsvol.LogicalAddr) { tree.mu.Lock() defer tree.mu.Unlock() - ctx = dlog.WithField(ctx, "btrfsinspect.rebuild-nodes.rebuild.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode)) + ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.add-root", fmt.Sprintf("tree=%v rootNode=%v", tree.ID, rootNode)) dlog.Info(ctx, "adding root...") leafToRoots := tree.leafToRoots(ctx) diff --git a/lib/textui/log.go b/lib/textui/log.go index 2a6fdd4..0a10ef6 100644 --- a/lib/textui/log.go +++ b/lib/textui/log.go @@ -289,57 +289,59 @@ func fieldOrd(key string) int { case "dexec.err": return -95 - // btrfsinspect scandevices //////////////////////////////////////////// - case "btrfsinspect.scandevices.dev": + // btrfs inspect rebuild-mappings scan ///////////////////////////////// + case "btrfs.inspect.rebuild-mappings.scan.dev": return -1 - // btrfsinspect rebuild-mappings /////////////////////////////////////// - case "btrfsinspect.rebuild-mappings.step": + // btrfs inspect rebuild-mappings process ////////////////////////////// + case "btrfs.inspect.rebuild-mappings.process.step": return -2 - case "btrfsinspect.rebuild-mappings.substep": + case "btrfs.inspect.rebuild-mappings.process.substep": return -1 - // btrfsinspect rebuild-nodes ////////////////////////////////////////// - case "btrfsinspect.rebuild-nodes.step": + // btrfs inspect rebuild-trees ///////////////////////////////////////// + case "btrfs.inspect.rebuild-trees.step": return -50 // step=read-fs-data - case "btrfsinspect.rebuild-nodes.read.substep": + case "btrfs.inspect.rebuild-trees.read.substep": return -1 // step=rebuild - case "btrfsinspect.rebuild-nodes.rebuild.pass": + case "btrfs.inspect.rebuild-trees.rebuild.pass": return -49 - case "btrfsinspect.rebuild-nodes.rebuild.substep": + case "btrfs.inspect.rebuild-trees.rebuild.substep": return -48 - case "btrfsinspect.rebuild-nodes.rebuild.substep.progress": + case "btrfs.inspect.rebuild-trees.rebuild.substep.progress": return -47 // step=rebuild, substep=collect-items (1/3) // step=rebuild, substep=settle-items (2a/3) - case "btrfsinspect.rebuild-nodes.rebuild.settle.item": + case "btrfs.inspect.rebuild-trees.rebuild.settle.item": return -25 // step=rebuild, substep=process-items (2b/3) - case "btrfsinspect.rebuild-nodes.rebuild.process.item": + case "btrfs.inspect.rebuild-trees.rebuild.process.item": return -25 // step=rebuild, substep=apply-augments (3/3) - case "btrfsinspect.rebuild-nodes.rebuild.augment.tree": + case "btrfs.inspect.rebuild-trees.rebuild.augment.tree": return -25 // step=rebuild (any substep) - case "btrfsinspect.rebuild-nodes.rebuild.want.key": + case "btrfs.inspect.rebuild-trees.rebuild.want.key": return -9 - case "btrfsinspect.rebuild-nodes.rebuild.want.reason": + case "btrfs.inspect.rebuild-trees.rebuild.want.reason": return -8 - case "btrfsinspect.rebuild-nodes.rebuild.add-tree": + + // btrfsutil.RebuiltForrest //////////////////////////////////////////// + case "btrfs.util.rebuilt-forrest.add-tree": return -7 - case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.key": + case "btrfs.util.rebuilt-forrest.add-tree.want.key": return -6 - case "btrfsinspect.rebuild-nodes.rebuild.add-tree.want.reason": + case "btrfs.util.rebuilt-forrest.add-tree.want.reason": return -5 - case "btrfsinspect.rebuild-nodes.rebuild.add-root": + case "btrfs.util.rebuilt-tree.add-root": return -4 - case "btrfsinspect.rebuild-nodes.rebuild.index-inc-items": + case "btrfs.util.rebuilt-tree.index-inc-items": return -3 - case "btrfsinspect.rebuild-nodes.rebuild.index-exc-items": + case "btrfs.util.rebuilt-tree.index-exc-items": return -2 - case "btrfsinspect.rebuild-nodes.rebuild.index-nodes": + case "btrfs.util.rebuilt-tree.index-nodes": return -1 // other /////////////////////////////////////////////////////////////// @@ -398,27 +400,37 @@ func writeField(w io.Writer, key string, val any) { case strings.HasSuffix(name, ".pass"): fmt.Fprintf(w, "/pass-%s", valStr) return - case strings.HasSuffix(name, ".substep") && name != "btrfsinspect.rebuild-nodes.rebuild.add-tree.substep": + case strings.HasSuffix(name, ".substep") && name != "btrfs.util.rebuilt-forrest.add-tree.substep": fmt.Fprintf(w, "/%s", valStr) return - case strings.HasPrefix(name, "btrfsinspect."): - name = strings.TrimPrefix(name, "btrfsinspect.") + case strings.HasPrefix(name, "btrfs."): + name = strings.TrimPrefix(name, "btrfs.") switch { - case strings.HasPrefix(name, "scandevices."): - name = strings.TrimPrefix(name, "scandevices.") - case strings.HasPrefix(name, "rebuild-mappings."): - name = strings.TrimPrefix(name, "rebuild-mappings.") - case strings.HasPrefix(name, "rebuild-nodes."): - name = strings.TrimPrefix(name, "rebuild-nodes.") + case strings.HasPrefix(name, "inspect."): + name = strings.TrimPrefix(name, "inspect.") switch { - case strings.HasPrefix(name, "read."): - name = strings.TrimPrefix(name, "read.") - case strings.HasPrefix(name, "rebuild."): - name = strings.TrimPrefix(name, "rebuild.") + case strings.HasPrefix(name, "rebuild-mappings."): + name = strings.TrimPrefix(name, "rebuild-mappings.") + switch { + case strings.HasPrefix(name, "scan."): + name = strings.TrimPrefix(name, "scan.") + case strings.HasPrefix(name, "process."): + name = strings.TrimPrefix(name, "process.") + } + case strings.HasPrefix(name, "rebuild-trees."): + name = strings.TrimPrefix(name, "rebuild-trees.") + switch { + case strings.HasPrefix(name, "read."): + name = strings.TrimPrefix(name, "read.") + case strings.HasPrefix(name, "rebuild."): + name = strings.TrimPrefix(name, "rebuild.") + } } + case strings.HasPrefix(name, "util.rebuilt-forrest."): + name = strings.TrimPrefix(name, "util.rebuilt-forrest.") + case strings.HasPrefix(name, "util.rebuilt-tree."): + name = strings.TrimPrefix(name, "util.rebuilt-tree.") } - case strings.HasPrefix(name, "btrfs."): - name = strings.TrimPrefix(name, "btrfs.") } fmt.Fprintf(w, " %s=%s", name, valStr) -- cgit v1.2.3-54-g00ecf