summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2023-03-26 10:47:43 -0600
committerLuke Shumaker <lukeshu@lukeshu.com>2023-03-27 14:36:19 -0600
commit5b57069df62182b23d5db38180df345fc3c86a85 (patch)
tree6f54d05420f35ee688d5d5ceea349f6f453aaeeb
parente2fb576c636f4c88dbb2741d1ad72469d6e6b43d (diff)
Move more things
-rw-r--r--cmd/btrfs-rec/inspect/lsfiles/lsfiles.go30
-rw-r--r--cmd/btrfs-rec/inspect/mount/mount.go33
-rw-r--r--lib/btrfs/io4_fs.go239
-rw-r--r--lib/btrfsutil/open.go1
-rw-r--r--lib/btrfsutil/rebuilt_forrest.go44
-rw-r--r--lib/btrfsutil/rebuilt_readitem.go20
-rw-r--r--lib/btrfsutil/rebuilt_tree.go134
-rw-r--r--lib/caching/lrucache.go17
-rw-r--r--lib/diskio/file_blockbuf.go98
-rw-r--r--lib/diskio/file_state_test.go5
10 files changed, 353 insertions, 268 deletions
diff --git a/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go b/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go
index e42050f..af8f690 100644
--- a/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go
+++ b/cmd/btrfs-rec/inspect/lsfiles/lsfiles.go
@@ -84,12 +84,14 @@ func printSubvol(out io.Writer, prefix string, isLast bool, name string, subvol
subvol.TreeID, fmtErr(err)))
return
}
- dir, err := subvol.LoadDir(rootInode)
+
+ dir, err := subvol.AcquireDir(rootInode)
if err != nil {
printText(out, prefix, isLast, name+"/", textui.Sprintf("subvol_id=%v err=%v",
subvol.TreeID, fmtErr(err)))
return
}
+
if name == "/" {
printDir(out, prefix, isLast, name, dir)
return
@@ -127,19 +129,23 @@ func fmtInode(inode btrfs.BareInode) string {
func printDir(out io.Writer, prefix string, isLast bool, name string, dir *btrfs.Dir) {
printText(out, prefix, isLast, name+"/", fmtInode(dir.BareInode))
+ childrenByName := dir.ChildrenByName
+ subvol := dir.SV
+ subvol.ReleaseDir(dir.Inode)
+
if isLast {
prefix += tS
} else {
prefix += tl
}
- for i, childName := range maps.SortedKeys(dir.ChildrenByName) {
+ for i, childName := range maps.SortedKeys(childrenByName) {
printDirEntry(
out,
prefix,
- i == len(dir.ChildrenByName)-1,
- dir.SV,
+ i == len(childrenByName)-1,
+ subvol,
path.Join(name, childName),
- dir.ChildrenByName[childName])
+ childrenByName[childName])
}
}
@@ -151,7 +157,7 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
case btrfsitem.FT_DIR:
switch entry.Location.ItemType {
case btrfsitem.INODE_ITEM_KEY:
- dir, err := subvol.LoadDir(entry.Location.ObjectID)
+ dir, err := subvol.AcquireDir(entry.Location.ObjectID)
if err != nil {
printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
@@ -168,44 +174,48 @@ func printDirEntry(out io.Writer, prefix string, isLast bool, subvol *btrfs.Subv
panic(fmt.Errorf("TODO: I don't know how to handle an FT_SYMLINK with location.ItemType=%v: %q",
entry.Location.ItemType, name))
}
- file, err := subvol.LoadFile(entry.Location.ObjectID)
+ file, err := subvol.AcquireFile(entry.Location.ObjectID)
if err != nil {
printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
+ defer subvol.ReleaseFile(entry.Location.ObjectID)
printSymlink(out, prefix, isLast, name, file)
case btrfsitem.FT_REG_FILE:
if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
panic(fmt.Errorf("TODO: I don't know how to handle an FT_REG_FILE with location.ItemType=%v: %q",
entry.Location.ItemType, name))
}
- file, err := subvol.LoadFile(entry.Location.ObjectID)
+ file, err := subvol.AcquireFile(entry.Location.ObjectID)
if err != nil {
printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
+ defer subvol.ReleaseFile(entry.Location.ObjectID)
printFile(out, prefix, isLast, name, file)
case btrfsitem.FT_SOCK:
if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
panic(fmt.Errorf("TODO: I don't know how to handle an FT_SOCK with location.ItemType=%v: %q",
entry.Location.ItemType, name))
}
- file, err := subvol.LoadFile(entry.Location.ObjectID)
+ file, err := subvol.AcquireFile(entry.Location.ObjectID)
if err != nil {
printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
+ defer subvol.ReleaseFile(entry.Location.ObjectID)
printSocket(out, prefix, isLast, name, file)
case btrfsitem.FT_FIFO:
if entry.Location.ItemType != btrfsitem.INODE_ITEM_KEY {
panic(fmt.Errorf("TODO: I don't know how to handle an FT_FIFO with location.ItemType=%v: %q",
entry.Location.ItemType, name))
}
- file, err := subvol.LoadFile(entry.Location.ObjectID)
+ file, err := subvol.AcquireFile(entry.Location.ObjectID)
if err != nil {
printText(out, prefix, isLast, name, textui.Sprintf("%v err=%v", entry.Type, fmtErr(err)))
return
}
+ defer subvol.ReleaseFile(entry.Location.ObjectID)
printPipe(out, prefix, isLast, name, file)
default:
panic(fmt.Errorf("TODO: I don't know how to handle a fileType=%v: %q",
diff --git a/cmd/btrfs-rec/inspect/mount/mount.go b/cmd/btrfs-rec/inspect/mount/mount.go
index d4d2e0a..143131a 100644
--- a/cmd/btrfs-rec/inspect/mount/mount.go
+++ b/cmd/btrfs-rec/inspect/mount/mount.go
@@ -169,8 +169,8 @@ func inodeItemToFUSE(itemBody btrfsitem.Inode) fuseops.InodeAttributes {
}
}
-func (sv *subvolume) LoadDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error) {
- val, err = sv.Subvolume.LoadDir(inode)
+func (sv *subvolume) AcquireDir(inode btrfsprim.ObjID) (val *btrfs.Dir, err error) {
+ val, err = sv.Subvolume.AcquireDir(inode)
if val != nil {
haveSubvolumes := false
for _, index := range maps.SortedKeys(val.ChildrenByIndex) {
@@ -245,10 +245,11 @@ func (sv *subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) e
op.Parent = fuseops.InodeID(parent)
}
- dir, err := sv.LoadDir(btrfsprim.ObjID(op.Parent))
+ dir, err := sv.AcquireDir(btrfsprim.ObjID(op.Parent))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseDir(btrfsprim.ObjID(op.Parent))
entry, ok := dir.ChildrenByName[op.Name]
if !ok {
return syscall.ENOENT
@@ -275,10 +276,13 @@ func (sv *subvolume) LookUpInode(_ context.Context, op *fuseops.LookUpInodeOp) e
}
return nil
}
- bareInode, err := sv.LoadBareInode(entry.Location.ObjectID)
+
+ bareInode, err := sv.AcquireBareInode(entry.Location.ObjectID)
if err != nil {
return err
}
+ defer sv.ReleaseBareInode(entry.Location.ObjectID)
+
op.Entry = fuseops.ChildInodeEntry{
Child: fuseops.InodeID(entry.Location.ObjectID),
Generation: fuseops.GenerationNumber(bareInode.InodeItem.Sequence),
@@ -296,10 +300,11 @@ func (sv *subvolume) GetInodeAttributes(_ context.Context, op *fuseops.GetInodeA
op.Inode = fuseops.InodeID(inode)
}
- bareInode, err := sv.LoadBareInode(btrfsprim.ObjID(op.Inode))
+ bareInode, err := sv.AcquireBareInode(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseBareInode(btrfsprim.ObjID(op.Inode))
op.Attributes = inodeItemToFUSE(*bareInode.InodeItem)
return nil
@@ -314,10 +319,12 @@ func (sv *subvolume) OpenDir(_ context.Context, op *fuseops.OpenDirOp) error {
op.Inode = fuseops.InodeID(inode)
}
- dir, err := sv.LoadDir(btrfsprim.ObjID(op.Inode))
+ dir, err := sv.AcquireDir(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseDir(btrfsprim.ObjID(op.Inode))
+
handle := sv.newHandle()
sv.dirHandles.Store(handle, &dirState{
Dir: dir,
@@ -369,10 +376,12 @@ func (sv *subvolume) ReleaseDirHandle(_ context.Context, op *fuseops.ReleaseDirH
}
func (sv *subvolume) OpenFile(_ context.Context, op *fuseops.OpenFileOp) error {
- file, err := sv.LoadFile(btrfsprim.ObjID(op.Inode))
+ file, err := sv.AcquireFile(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseFile(btrfsprim.ObjID(op.Inode))
+
handle := sv.newHandle()
sv.fileHandles.Store(handle, &fileState{
File: file,
@@ -415,10 +424,12 @@ func (sv *subvolume) ReleaseFileHandle(_ context.Context, op *fuseops.ReleaseFil
}
func (sv *subvolume) ReadSymlink(_ context.Context, op *fuseops.ReadSymlinkOp) error {
- file, err := sv.LoadFile(btrfsprim.ObjID(op.Inode))
+ file, err := sv.AcquireFile(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseFile(btrfsprim.ObjID(op.Inode))
+
reader := io.NewSectionReader(file, 0, file.InodeItem.Size)
tgt, err := io.ReadAll(reader)
if err != nil {
@@ -437,10 +448,11 @@ func (sv *subvolume) ListXattr(_ context.Context, op *fuseops.ListXattrOp) error
op.Inode = fuseops.InodeID(inode)
}
- fullInode, err := sv.LoadFullInode(btrfsprim.ObjID(op.Inode))
+ fullInode, err := sv.AcquireFullInode(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseFullInode(btrfsprim.ObjID(op.Inode))
size := 0
for name := range fullInode.XAttrs {
@@ -469,10 +481,11 @@ func (sv *subvolume) GetXattr(_ context.Context, op *fuseops.GetXattrOp) error {
op.Inode = fuseops.InodeID(inode)
}
- fullInode, err := sv.LoadFullInode(btrfsprim.ObjID(op.Inode))
+ fullInode, err := sv.AcquireFullInode(btrfsprim.ObjID(op.Inode))
if err != nil {
return err
}
+ defer sv.Subvolume.ReleaseFullInode(btrfsprim.ObjID(op.Inode))
val, ok := fullInode.XAttrs[op.Name]
if !ok {
diff --git a/lib/btrfs/io4_fs.go b/lib/btrfs/io4_fs.go
index 4a68695..c25614c 100644
--- a/lib/btrfs/io4_fs.go
+++ b/lib/btrfs/io4_fs.go
@@ -19,6 +19,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/caching"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/maps"
@@ -75,10 +76,10 @@ type Subvolume struct {
rootInfo btrfstree.TreeRoot
rootErr error
- bareInodeCache containers.ARCache[btrfsprim.ObjID, *BareInode]
- fullInodeCache containers.ARCache[btrfsprim.ObjID, *FullInode]
- dirCache containers.ARCache[btrfsprim.ObjID, *Dir]
- fileCache containers.ARCache[btrfsprim.ObjID, *File]
+ bareInodeCache caching.Cache[btrfsprim.ObjID, BareInode]
+ fullInodeCache caching.Cache[btrfsprim.ObjID, FullInode]
+ dirCache caching.Cache[btrfsprim.ObjID, Dir]
+ fileCache caching.Cache[btrfsprim.ObjID, File]
}
func NewSubvolume(
@@ -109,10 +110,14 @@ func NewSubvolume(
}
sv.rootInfo = *rootInfo
- sv.bareInodeCache.MaxLen = textui.Tunable(128)
- sv.fullInodeCache.MaxLen = textui.Tunable(128)
- sv.dirCache.MaxLen = textui.Tunable(128)
- sv.fileCache.MaxLen = textui.Tunable(128)
+ sv.bareInodeCache = caching.NewLRUCache[btrfsprim.ObjID, BareInode](textui.Tunable(128),
+ caching.FuncSource[btrfsprim.ObjID, BareInode](sv.loadBareInode))
+ sv.fullInodeCache = caching.NewLRUCache[btrfsprim.ObjID, FullInode](textui.Tunable(128),
+ caching.FuncSource[btrfsprim.ObjID, FullInode](sv.loadFullInode))
+ sv.dirCache = caching.NewLRUCache[btrfsprim.ObjID, Dir](textui.Tunable(128),
+ caching.FuncSource[btrfsprim.ObjID, Dir](sv.loadDir))
+ sv.fileCache = caching.NewLRUCache[btrfsprim.ObjID, File](textui.Tunable(128),
+ caching.FuncSource[btrfsprim.ObjID, File](sv.loadFile))
return sv
}
@@ -125,113 +130,128 @@ func (sv *Subvolume) GetRootInode() (btrfsprim.ObjID, error) {
return sv.rootInfo.RootInode, sv.rootErr
}
-func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
- val := containers.LoadOrElse[btrfsprim.ObjID, *BareInode](&sv.bareInodeCache, inode, func(inode btrfsprim.ObjID) (val *BareInode) {
- val = &BareInode{
- Inode: inode,
- }
- item, err := sv.fs.TreeLookup(sv.TreeID, btrfsprim.Key{
- ObjectID: inode,
- ItemType: btrfsitem.INODE_ITEM_KEY,
- Offset: 0,
- })
- if err != nil {
- val.Errs = append(val.Errs, err)
- return val
- }
+func (sv *Subvolume) AcquireBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
+ val := sv.bareInodeCache.Acquire(sv.ctx, inode)
+ if val.InodeItem == nil {
+ sv.bareInodeCache.Release(inode)
+ return nil, val.Errs
+ }
+ return val, nil
+}
- switch itemBody := item.Body.(type) {
- case *btrfsitem.Inode:
- bodyCopy := itemBody.Clone()
- val.InodeItem = &bodyCopy
- case *btrfsitem.Error:
- val.Errs = append(val.Errs, fmt.Errorf("malformed inode: %w", itemBody.Err))
- default:
- panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
- }
+func (sv *Subvolume) ReleaseBareInode(inode btrfsprim.ObjID) {
+ sv.bareInodeCache.Release(inode)
+}
- return val
+func (sv *Subvolume) loadBareInode(_ context.Context, inode btrfsprim.ObjID, val *BareInode) {
+ *val = BareInode{
+ Inode: inode,
+ }
+ item, err := sv.fs.TreeLookup(sv.TreeID, btrfsprim.Key{
+ ObjectID: inode,
+ ItemType: btrfsitem.INODE_ITEM_KEY,
+ Offset: 0,
})
- if val.InodeItem == nil {
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+
+ switch itemBody := item.Body.(type) {
+ case *btrfsitem.Inode:
+ bodyCopy := itemBody.Clone()
+ val.InodeItem = &bodyCopy
+ case *btrfsitem.Error:
+ val.Errs = append(val.Errs, fmt.Errorf("malformed inode: %w", itemBody.Err))
+ default:
+ panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
+ }
+}
+
+func (sv *Subvolume) AcquireFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
+ val := sv.fullInodeCache.Acquire(sv.ctx, inode)
+ if val.InodeItem == nil && val.OtherItems == nil {
+ sv.fullInodeCache.Release(inode)
return nil, val.Errs
}
return val, nil
}
-func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
- val := containers.LoadOrElse[btrfsprim.ObjID, *FullInode](&sv.fullInodeCache, inode, func(indoe btrfsprim.ObjID) (val *FullInode) {
- val = &FullInode{
- BareInode: BareInode{
- Inode: inode,
- },
- XAttrs: make(map[string]string),
- }
- items, err := sv.fs.TreeSearchAll(sv.TreeID, btrfstree.SearchObject(inode))
- if err != nil {
- val.Errs = append(val.Errs, err)
- if len(items) == 0 {
- return val
- }
+func (sv *Subvolume) ReleaseFullInode(inode btrfsprim.ObjID) {
+ sv.fullInodeCache.Release(inode)
+}
+
+func (sv *Subvolume) loadFullInode(_ context.Context, inode btrfsprim.ObjID, val *FullInode) {
+ *val = FullInode{
+ BareInode: BareInode{
+ Inode: inode,
+ },
+ XAttrs: make(map[string]string),
+ }
+ items, err := sv.fs.TreeSearchAll(sv.TreeID, btrfstree.SearchObject(inode))
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ if len(items) == 0 {
+ return
}
- for _, item := range items {
- switch item.Key.ItemType {
- case btrfsitem.INODE_ITEM_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.Inode:
- if val.InodeItem != nil {
- if !reflect.DeepEqual(itemBody, *val.InodeItem) {
- val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
- }
- continue
+ }
+ for _, item := range items {
+ switch item.Key.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ switch itemBody := item.Body.(type) {
+ case *btrfsitem.Inode:
+ if val.InodeItem != nil {
+ if !reflect.DeepEqual(itemBody, *val.InodeItem) {
+ val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
}
- bodyCopy := itemBody.Clone()
- val.InodeItem = &bodyCopy
- case *btrfsitem.Error:
- val.Errs = append(val.Errs, fmt.Errorf("malformed INODE_ITEM: %w", itemBody.Err))
- default:
- panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
- }
- case btrfsitem.XATTR_ITEM_KEY:
- switch itemBody := item.Body.(type) {
- case *btrfsitem.DirEntry:
- val.XAttrs[string(itemBody.Name)] = string(itemBody.Data)
- case *btrfsitem.Error:
- val.Errs = append(val.Errs, fmt.Errorf("malformed XATTR_ITEM: %w", itemBody.Err))
- default:
- panic(fmt.Errorf("should not happen: XATTR_ITEM has unexpected item type: %T", itemBody))
+ continue
}
+ bodyCopy := itemBody.Clone()
+ val.InodeItem = &bodyCopy
+ case *btrfsitem.Error:
+ val.Errs = append(val.Errs, fmt.Errorf("malformed INODE_ITEM: %w", itemBody.Err))
default:
- val.OtherItems = append(val.OtherItems, item)
+ panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
}
+ case btrfsitem.XATTR_ITEM_KEY:
+ switch itemBody := item.Body.(type) {
+ case *btrfsitem.DirEntry:
+ val.XAttrs[string(itemBody.Name)] = string(itemBody.Data)
+ case *btrfsitem.Error:
+ val.Errs = append(val.Errs, fmt.Errorf("malformed XATTR_ITEM: %w", itemBody.Err))
+ default:
+ panic(fmt.Errorf("should not happen: XATTR_ITEM has unexpected item type: %T", itemBody))
+ }
+ default:
+ val.OtherItems = append(val.OtherItems, item)
}
- return val
- })
- if val.InodeItem == nil && val.OtherItems == nil {
- return nil, val.Errs
}
- return val, nil
}
-func (sv *Subvolume) LoadDir(inode btrfsprim.ObjID) (*Dir, error) {
- val := containers.LoadOrElse[btrfsprim.ObjID, *Dir](&sv.dirCache, inode, func(inode btrfsprim.ObjID) (val *Dir) {
- val = new(Dir)
- fullInode, err := sv.LoadFullInode(inode)
- if err != nil {
- val.Errs = append(val.Errs, err)
- return val
- }
- val.FullInode = *fullInode
- val.SV = sv
- val.populate()
- return val
- })
+func (sv *Subvolume) AcquireDir(inode btrfsprim.ObjID) (*Dir, error) {
+ val := sv.dirCache.Acquire(sv.ctx, inode)
if val.Inode == 0 {
+ sv.dirCache.Release(inode)
return nil, val.Errs
}
return val, nil
}
-func (dir *Dir) populate() {
+func (sv *Subvolume) ReleaseDir(inode btrfsprim.ObjID) {
+ sv.dirCache.Release(inode)
+}
+
+func (sv *Subvolume) loadDir(_ context.Context, inode btrfsprim.ObjID, dir *Dir) {
+ *dir = Dir{}
+ fullInode, err := sv.AcquireFullInode(inode)
+ if err != nil {
+ dir.Errs = append(dir.Errs, err)
+ return
+ }
+ dir.FullInode = *fullInode
+ sv.ReleaseFullInode(inode)
+ dir.SV = sv
+
dir.ChildrenByName = make(map[string]btrfsitem.DirEntry)
dir.ChildrenByIndex = make(map[uint64]btrfsitem.DirEntry)
for _, item := range dir.OtherItems {
@@ -337,37 +357,42 @@ func (dir *Dir) AbsPath() (string, error) {
if dir.DotDot == nil {
return "", fmt.Errorf("missing .. entry in dir inode %v", dir.Inode)
}
- parent, err := dir.SV.LoadDir(dir.DotDot.Inode)
+ parent, err := dir.SV.AcquireDir(dir.DotDot.Inode)
if err != nil {
return "", err
}
parentName, err := parent.AbsPath()
+ dir.SV.ReleaseDir(dir.DotDot.Inode)
if err != nil {
return "", err
}
return filepath.Join(parentName, string(dir.DotDot.Name)), nil
}
-func (sv *Subvolume) LoadFile(inode btrfsprim.ObjID) (*File, error) {
- val := containers.LoadOrElse[btrfsprim.ObjID, *File](&sv.fileCache, inode, func(inode btrfsprim.ObjID) (val *File) {
- val = new(File)
- fullInode, err := sv.LoadFullInode(inode)
- if err != nil {
- val.Errs = append(val.Errs, err)
- return val
- }
- val.FullInode = *fullInode
- val.SV = sv
- val.populate()
- return val
- })
+func (sv *Subvolume) AcquireFile(inode btrfsprim.ObjID) (*File, error) {
+ val := sv.fileCache.Acquire(sv.ctx, inode)
if val.Inode == 0 {
+ sv.fileCache.Release(inode)
return nil, val.Errs
}
return val, nil
}
-func (file *File) populate() {
+func (sv *Subvolume) ReleaseFile(inode btrfsprim.ObjID) {
+ sv.fileCache.Release(inode)
+}
+
+func (sv *Subvolume) loadFile(_ context.Context, inode btrfsprim.ObjID, file *File) {
+ *file = File{}
+ fullInode, err := sv.AcquireFullInode(inode)
+ if err != nil {
+ file.Errs = append(file.Errs, err)
+ return
+ }
+ file.FullInode = *fullInode
+ sv.ReleaseFullInode(inode)
+ file.SV = sv
+
for _, item := range file.OtherItems {
switch item.Key.ItemType {
case btrfsitem.INODE_REF_KEY:
diff --git a/lib/btrfsutil/open.go b/lib/btrfsutil/open.go
index c5ee314..abbd466 100644
--- a/lib/btrfsutil/open.go
+++ b/lib/btrfsutil/open.go
@@ -30,6 +30,7 @@ func Open(ctx context.Context, flag int, filenames ...string) (*btrfs.FS, error)
File: osFile,
}
bufFile := diskio.NewBufferedFile[btrfsvol.PhysicalAddr](
+ ctx,
typedFile,
//nolint:gomnd // False positive: gomnd.ignored-functions=[textui.Tunable] doesn't support type params.
textui.Tunable[btrfsvol.PhysicalAddr](16*1024), // block size: 16KiB
diff --git a/lib/btrfsutil/rebuilt_forrest.go b/lib/btrfsutil/rebuilt_forrest.go
index b5c646d..97d6851 100644
--- a/lib/btrfsutil/rebuilt_forrest.go
+++ b/lib/btrfsutil/rebuilt_forrest.go
@@ -7,7 +7,6 @@ package btrfsutil
import (
"context"
"fmt"
- "sync"
"github.com/datawire/dlib/dlog"
@@ -15,6 +14,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/caching"
"git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
"git.lukeshu.com/btrfs-progs-ng/lib/slices"
@@ -140,12 +140,11 @@ type RebuiltForrest struct {
treesMu nestedMutex
trees map[btrfsprim.ObjID]*RebuiltTree // must hold .treesMu to access
- leafs containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
- incItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
- excItems containers.ARCache[btrfsprim.ObjID, *itemIndex]
+ leafs caching.Cache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]
+ incItems caching.Cache[btrfsprim.ObjID, itemIndex]
+ excItems caching.Cache[btrfsprim.ObjID, itemIndex]
- nodesMu sync.Mutex
- nodes containers.ARCache[btrfsvol.LogicalAddr, *btrfstree.Node]
+ nodes caching.Cache[btrfsvol.LogicalAddr, btrfstree.Node]
}
// NewRebuiltForrest returns a new RebuiltForrest instance. The
@@ -158,23 +157,24 @@ func NewRebuiltForrest(file diskio.File[btrfsvol.LogicalAddr], sb btrfstree.Supe
cb: cb,
trees: make(map[btrfsprim.ObjID]*RebuiltTree),
- leafs: containers.ARCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]]{
- MaxLen: textui.Tunable(8),
- },
- incItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
- MaxLen: textui.Tunable(8),
- },
- excItems: containers.ARCache[btrfsprim.ObjID, *itemIndex]{
- MaxLen: textui.Tunable(8),
- },
-
- nodes: containers.ARCache[btrfsvol.LogicalAddr, *btrfstree.Node]{
- MaxLen: textui.Tunable(8),
- OnRemove: func(_ btrfsvol.LogicalAddr, node *btrfstree.Node) {
- node.Free()
- },
- },
}
+
+ ret.leafs = caching.NewLRUCache[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](textui.Tunable(8),
+ caching.FuncSource[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](
+ func(ctx context.Context, treeID btrfsprim.ObjID, leafs *map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]) {
+ *leafs = ret.trees[treeID].uncachedLeafToRoots(ctx)
+ }))
+ ret.incItems = caching.NewLRUCache[btrfsprim.ObjID, itemIndex](textui.Tunable(8),
+ caching.FuncSource[btrfsprim.ObjID, itemIndex](func(ctx context.Context, treeID btrfsprim.ObjID, incItems *itemIndex) {
+ *incItems = *ret.trees[treeID].uncachedIncItems(ctx)
+ }))
+ ret.excItems = caching.NewLRUCache[btrfsprim.ObjID, itemIndex](textui.Tunable(8),
+ caching.FuncSource[btrfsprim.ObjID, itemIndex](func(ctx context.Context, treeID btrfsprim.ObjID, excItems *itemIndex) {
+ *excItems = *ret.trees[treeID].uncachedExcItems(ctx)
+ }))
+ ret.nodes = caching.NewLRUCache[btrfsvol.LogicalAddr, btrfstree.Node](textui.Tunable(8),
+ caching.FuncSource[btrfsvol.LogicalAddr, btrfstree.Node](ret.readNode))
+
if ret.cb == nil {
ret.cb = noopRebuiltForrestCallbacks{
forrest: ret,
diff --git a/lib/btrfsutil/rebuilt_readitem.go b/lib/btrfsutil/rebuilt_readitem.go
index 03a7cdc..d3a2253 100644
--- a/lib/btrfsutil/rebuilt_readitem.go
+++ b/lib/btrfsutil/rebuilt_readitem.go
@@ -26,18 +26,14 @@ func (ptr ItemPtr) String() string {
return fmt.Sprintf("node@%v[%v]", ptr.Node, ptr.Slot)
}
-func (ts *RebuiltForrest) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr) *btrfstree.Node {
- if cached, ok := ts.nodes.Load(laddr); ok {
- dlog.Tracef(ctx, "cache-hit node@%v", laddr)
- return cached
- }
+func (ts *RebuiltForrest) readNode(ctx context.Context, laddr btrfsvol.LogicalAddr, out *btrfstree.Node) {
+ dlog.Debugf(ctx, "cache-miss node@%v, reading...", laddr)
graphInfo, ok := ts.graph.Nodes[laddr]
if !ok {
panic(fmt.Errorf("should not happen: node@%v is not mentioned in the in-memory graph", laddr))
}
- dlog.Debugf(ctx, "cache-miss node@%v, reading...", laddr)
node, err := btrfstree.ReadNode[btrfsvol.LogicalAddr](ts.file, ts.sb, laddr, btrfstree.NodeExpectations{
LAddr: containers.OptionalValue(laddr),
Level: containers.OptionalValue(graphInfo.Level),
@@ -56,22 +52,20 @@ func (ts *RebuiltForrest) readNode(ctx context.Context, laddr btrfsvol.LogicalAd
if err != nil {
panic(fmt.Errorf("should not happen: i/o error: %w", err))
}
-
- ts.nodes.Store(laddr, node)
-
- return node
+ *out = *node
}
func (ts *RebuiltForrest) readItem(ctx context.Context, ptr ItemPtr) btrfsitem.Item {
- ts.nodesMu.Lock()
- defer ts.nodesMu.Unlock()
if ts.graph.Nodes[ptr.Node].Level != 0 {
panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for non-leaf node@%v", ptr.Node))
}
if ptr.Slot < 0 {
panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for negative item slot: %v", ptr.Slot))
}
- items := ts.readNode(ctx, ptr.Node).BodyLeaf
+
+ items := ts.nodes.Acquire(ctx, ptr.Node).BodyLeaf
+ defer ts.nodes.Release(ptr.Node)
+
if ptr.Slot >= len(items) {
panic(fmt.Errorf("should not happen: btrfsutil.RebuiltForrest.readItem called for out-of-bounds item slot: slot=%v len=%v",
ptr.Slot, len(items)))
diff --git a/lib/btrfsutil/rebuilt_tree.go b/lib/btrfsutil/rebuilt_tree.go
index 96d5a75..74ac84a 100644
--- a/lib/btrfsutil/rebuilt_tree.go
+++ b/lib/btrfsutil/rebuilt_tree.go
@@ -47,33 +47,37 @@ type RebuiltTree struct {
// leafToRoots returns all leafs (lvl=0) in the filesystem that pass
// .isOwnerOK, whether or not they're in the tree.
func (tree *RebuiltTree) leafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
- return containers.LoadOrElse[btrfsprim.ObjID, map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr]](&tree.forrest.leafs, tree.ID, func(btrfsprim.ObjID) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
- ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
+ ret := tree.forrest.leafs.Acquire(ctx, tree.ID)
+ tree.forrest.leafs.Release(tree.ID)
+ return *ret
+}
- nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
+func (tree *RebuiltTree) uncachedLeafToRoots(ctx context.Context) map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr] {
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-nodes", fmt.Sprintf("tree=%v", tree.ID))
- var stats textui.Portion[int]
- stats.D = len(tree.forrest.graph.Nodes)
- progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- progress := func() {
- stats.N = len(nodeToRoots)
- progressWriter.Set(stats)
- }
+ nodeToRoots := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
- progress()
- for _, node := range maps.SortedKeys(tree.forrest.graph.Nodes) {
- tree.indexNode(ctx, node, nodeToRoots, progress, nil)
- }
- progressWriter.Done()
+ var stats textui.Portion[int]
+ stats.D = len(tree.forrest.graph.Nodes)
+ progressWriter := textui.NewProgress[textui.Portion[int]](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+ progress := func() {
+ stats.N = len(nodeToRoots)
+ progressWriter.Set(stats)
+ }
- ret := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
- for node, roots := range nodeToRoots {
- if tree.forrest.graph.Nodes[node].Level == 0 && len(roots) > 0 {
- ret[node] = roots
- }
+ progress()
+ for _, node := range maps.SortedKeys(tree.forrest.graph.Nodes) {
+ tree.indexNode(ctx, node, nodeToRoots, progress, nil)
+ }
+ progressWriter.Done()
+
+ ret := make(map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr])
+ for node, roots := range nodeToRoots {
+ if tree.forrest.graph.Nodes[node].Level == 0 && len(roots) > 0 {
+ ret[node] = roots
}
- return ret
- })
+ }
+ return ret
}
func (tree *RebuiltTree) indexNode(ctx context.Context, node btrfsvol.LogicalAddr, index map[btrfsvol.LogicalAddr]containers.Set[btrfsvol.LogicalAddr], progress func(), stack []btrfsvol.LogicalAddr) {
@@ -136,8 +140,9 @@ func (tree *RebuiltTree) isOwnerOK(owner btrfsprim.ObjID, gen btrfsprim.Generati
// Do not mutate the returned map; it is a pointer to the
// RebuiltTree's internal map!
func (tree *RebuiltTree) RebuiltItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
- ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, &tree.forrest.incItems, tree.Roots.HasAny)
+ ret := tree.forrest.incItems.Acquire(ctx, tree.ID)
+ tree.forrest.incItems.Release(tree.ID)
+ return ret
}
// RebuiltPotentialItems returns a map of items that could be added to
@@ -146,8 +151,19 @@ func (tree *RebuiltTree) RebuiltItems(ctx context.Context) *containers.SortedMap
// Do not mutate the returned map; it is a pointer to the
// RebuiltTree's internal map!
func (tree *RebuiltTree) RebuiltPotentialItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
+ ret := tree.forrest.excItems.Acquire(ctx, tree.ID)
+ tree.forrest.excItems.Release(tree.ID)
+ return ret
+}
+
+func (tree *RebuiltTree) uncachedIncItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
+ ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-inc-items", fmt.Sprintf("tree=%v", tree.ID))
+ return tree.items(ctx, tree.Roots.HasAny)
+}
+
+func (tree *RebuiltTree) uncachedExcItems(ctx context.Context) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
ctx = dlog.WithField(ctx, "btrfs.util.rebuilt-tree.index-exc-items", fmt.Sprintf("tree=%v", tree.ID))
- return tree.items(ctx, &tree.forrest.excItems,
+ return tree.items(ctx,
func(roots containers.Set[btrfsvol.LogicalAddr]) bool {
return !tree.Roots.HasAny(roots)
})
@@ -166,54 +182,50 @@ func (s itemStats) String() string {
s.Leafs, s.NumItems, s.NumDups)
}
-func (tree *RebuiltTree) items(ctx context.Context, cache containers.Map[btrfsprim.ObjID, *itemIndex],
- leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool,
-) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
+func (tree *RebuiltTree) items(ctx context.Context, leafFn func(roots containers.Set[btrfsvol.LogicalAddr]) bool) *containers.SortedMap[btrfsprim.Key, ItemPtr] {
tree.mu.RLock()
defer tree.mu.RUnlock()
- return containers.LoadOrElse(cache, tree.ID, func(btrfsprim.ObjID) *itemIndex {
- var leafs []btrfsvol.LogicalAddr
- for leaf, roots := range tree.leafToRoots(ctx) {
- if leafFn(roots) {
- leafs = append(leafs, leaf)
- }
+ var leafs []btrfsvol.LogicalAddr
+ for leaf, roots := range tree.leafToRoots(ctx) {
+ if leafFn(roots) {
+ leafs = append(leafs, leaf)
}
- slices.Sort(leafs)
+ }
+ slices.Sort(leafs)
- var stats itemStats
- stats.Leafs.D = len(leafs)
- progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
+ var stats itemStats
+ stats.Leafs.D = len(leafs)
+ progressWriter := textui.NewProgress[itemStats](ctx, dlog.LogLevelInfo, textui.Tunable(1*time.Second))
- index := new(containers.SortedMap[btrfsprim.Key, ItemPtr])
- for i, leaf := range leafs {
- stats.Leafs.N = i
- progressWriter.Set(stats)
- for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items {
- newPtr := ItemPtr{
- Node: leaf,
- Slot: j,
- }
- if oldPtr, exists := index.Load(itemKey); !exists {
+ index := new(containers.SortedMap[btrfsprim.Key, ItemPtr])
+ for i, leaf := range leafs {
+ stats.Leafs.N = i
+ progressWriter.Set(stats)
+ for j, itemKey := range tree.forrest.graph.Nodes[leaf].Items {
+ newPtr := ItemPtr{
+ Node: leaf,
+ Slot: j,
+ }
+ if oldPtr, exists := index.Load(itemKey); !exists {
+ index.Store(itemKey, newPtr)
+ stats.NumItems++
+ } else {
+ if tree.RebuiltShouldReplace(oldPtr.Node, newPtr.Node) {
index.Store(itemKey, newPtr)
- stats.NumItems++
- } else {
- if tree.RebuiltShouldReplace(oldPtr.Node, newPtr.Node) {
- index.Store(itemKey, newPtr)
- }
- stats.NumDups++
}
- progressWriter.Set(stats)
+ stats.NumDups++
}
- }
- if stats.Leafs.N > 0 {
- stats.Leafs.N = len(leafs)
progressWriter.Set(stats)
- progressWriter.Done()
}
+ }
+ if stats.Leafs.N > 0 {
+ stats.Leafs.N = len(leafs)
+ progressWriter.Set(stats)
+ progressWriter.Done()
+ }
- return index
- })
+ return index
}
// main public API /////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/lib/caching/lrucache.go b/lib/caching/lrucache.go
index c41da09..095a78c 100644
--- a/lib/caching/lrucache.go
+++ b/lib/caching/lrucache.go
@@ -14,7 +14,7 @@ type lruEntry[K comparable, V any] struct {
key K
val V
refs int
- del chan struct{}
+ del chan struct{} // non-nil if a delete is waiting on .refs to drop to zero
}
type lruCache[K comparable, V any] struct {
@@ -23,8 +23,6 @@ type lruCache[K comparable, V any] struct {
mu sync.Mutex
- len int
-
unused LinkedList[lruEntry[K, V]]
evictable LinkedList[lruEntry[K, V]] // only entries with .refs==0
byName map[K]*LinkedListEntry[lruEntry[K, V]]
@@ -32,8 +30,8 @@ type lruCache[K comparable, V any] struct {
waiters LinkedList[chan *LinkedListEntry[lruEntry[K, V]]]
}
-// NewLRUCache returns a new Cache with a simple Least-Recently-Used eviction
-// policy.
+// NewLRUCache returns a new thread-safe Cache with a simple Least-Recently-Used
+// eviction policy.
//
// It is invalid (runtime-panic) to call NewLRUCache with a non-positive
// capacity or a nil source.
@@ -44,10 +42,14 @@ func NewLRUCache[K comparable, V any](cap int, src Source[K, V]) Cache[K, V] {
if src == nil {
panic(fmt.Errorf("caching.NewLRUCache: nil source"))
}
- return &lruCache[K, V]{
+ ret := &lruCache[K, V]{
cap: cap,
src: src,
}
+ for i := 0; i < cap; i++ {
+ c.unused.Store(new(LinkedListEntry[lruEntry[K, V]]))
+ }
+ return ret
}
// Acquire implements Cache.
@@ -69,9 +71,6 @@ func (c *lruCache[K, V]) Acquire(ctx context.Context, k K) *V {
case !c.unused.IsEmpty():
entry = c.unused.Oldest()
c.unused.Delete(entry)
- case c.len < c.cap:
- entry = new(LinkedListEntry[lruEntry[K, V]])
- c.len++
case !c.evictable.IsEmpty():
entry = c.evictable.Oldest()
c.evictable.Delete(entry)
diff --git a/lib/diskio/file_blockbuf.go b/lib/diskio/file_blockbuf.go
index 0bb3156..580e55a 100644
--- a/lib/diskio/file_blockbuf.go
+++ b/lib/diskio/file_blockbuf.go
@@ -5,63 +5,74 @@
package diskio
import (
+ "context"
"sync"
- "git.lukeshu.com/go/typedsync"
+ "github.com/datawire/dlib/dlog"
- "git.lukeshu.com/btrfs-progs-ng/lib/containers"
+ "git.lukeshu.com/btrfs-progs-ng/lib/caching"
)
-type bufferedBlock struct {
+type bufferedBlock[A ~int64] struct {
+ Mu sync.RWMutex
+ Addr A
+ Dirty bool
+
Dat []byte
Err error
}
type bufferedFile[A ~int64] struct {
+ ctx context.Context
inner File[A]
- mu sync.RWMutex
blockSize A
- blockCache containers.ARCache[A, bufferedBlock]
- blockPool typedsync.Pool[[]byte]
+ blockCache caching.Cache[A, bufferedBlock[A]]
}
var _ File[assertAddr] = (*bufferedFile[assertAddr])(nil)
-func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *bufferedFile[A] {
+func NewBufferedFile[A ~int64](ctx context.Context, file File[A], blockSize A, cacheSize int) *bufferedFile[A] {
ret := &bufferedFile[A]{
inner: file,
blockSize: blockSize,
- blockCache: containers.ARCache[A, bufferedBlock]{
- MaxLen: cacheSize,
- },
}
- ret.blockPool.New = ret.malloc
- ret.blockCache.OnRemove = ret.free
- ret.blockCache.New = ret.readBlock
+ ret.blockCache = caching.NewLRUCache[A, bufferedBlock[A]](cacheSize, bufferedBlockSource[A]{ret})
return ret
}
-func (bf *bufferedFile[A]) malloc() []byte {
- return make([]byte, bf.blockSize)
+type bufferedBlockSource[A ~int64] struct {
+ bf *bufferedFile[A]
}
-func (bf *bufferedFile[A]) free(_ A, buf bufferedBlock) {
- bf.blockPool.Put(buf.Dat)
+func (src bufferedBlockSource[A]) Flush(ctx context.Context, block *bufferedBlock[A]) {
+ if !block.Dirty {
+ return
+ }
+ if _, err := src.bf.inner.WriteAt(block.Dat, block.Addr); err != nil {
+ dlog.Errorf(src.bf.ctx, "i/o error: write: %v", err)
+ }
+ block.Dirty = false
}
-func (bf *bufferedFile[A]) readBlock(blockOffset A) bufferedBlock {
- dat, _ := bf.blockPool.Get()
- n, err := bf.inner.ReadAt(dat, blockOffset)
- return bufferedBlock{
- Dat: dat[:n],
- Err: err,
+func (src bufferedBlockSource[A]) Load(ctx context.Context, blockAddr A, block *bufferedBlock[A]) {
+ src.Flush(ctx, block)
+ if block.Dat == nil {
+ block.Dat = make([]byte, src.bf.blockSize)
}
+ n, err := src.bf.inner.ReadAt(block.Dat[:src.bf.blockSize], blockAddr)
+ block.Addr = blockAddr
+ block.Dat = block.Dat[:n]
+ block.Err = err
}
func (bf *bufferedFile[A]) Name() string { return bf.inner.Name() }
func (bf *bufferedFile[A]) Size() A { return bf.inner.Size() }
func (bf *bufferedFile[A]) Close() error { return bf.inner.Close() }
+func (bf *bufferedFile[A]) Flush() {
+ bf.blockCache.Flush(bf.ctx)
+}
+
func (bf *bufferedFile[A]) ReadAt(dat []byte, off A) (n int, err error) {
done := 0
for done < len(dat) {
@@ -75,11 +86,14 @@ func (bf *bufferedFile[A]) ReadAt(dat []byte, off A) (n int, err error) {
}
func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error) {
- bf.mu.RLock()
- defer bf.mu.RUnlock()
offsetWithinBlock := off % bf.blockSize
blockOffset := off - offsetWithinBlock
- cachedBlock, _ := bf.blockCache.Load(blockOffset)
+
+ cachedBlock := bf.blockCache.Acquire(bf.ctx, blockOffset)
+ defer bf.blockCache.Release(blockOffset)
+ cachedBlock.Mu.RLock()
+ defer cachedBlock.Mu.RUnlock()
+
n = copy(dat, cachedBlock.Dat[offsetWithinBlock:])
if n < len(dat) {
return n, cachedBlock.Err
@@ -88,16 +102,30 @@ func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error
}
func (bf *bufferedFile[A]) WriteAt(dat []byte, off A) (n int, err error) {
- bf.mu.Lock()
- defer bf.mu.Unlock()
+ done := 0
+ for done < len(dat) {
+ n, err := bf.maybeShortWriteAt(dat[done:], off+A(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
- // Do the work
- n, err = bf.inner.WriteAt(dat, off)
+func (bf *bufferedFile[A]) maybeShortWriteAt(dat []byte, off A) (n int, err error) {
+ offsetWithinBlock := off % bf.blockSize
+ blockOffset := off - offsetWithinBlock
- // Cache invalidation
- for blockOffset := off - (off % bf.blockSize); blockOffset < off+A(n); blockOffset += bf.blockSize {
- bf.blockCache.Delete(blockOffset)
- }
+ cachedBlock := bf.blockCache.Acquire(bf.ctx, blockOffset)
+ defer bf.blockCache.Release(blockOffset)
+ cachedBlock.Mu.Lock()
+ defer cachedBlock.Mu.Unlock()
- return n, err
+ cachedBlock.Dirty = true
+ n = copy(cachedBlock.Dat[offsetWithinBlock:], dat)
+ if n < len(dat) {
+ return n, cachedBlock.Err
+ }
+ return n, nil
}
diff --git a/lib/diskio/file_state_test.go b/lib/diskio/file_state_test.go
index 32ca705..b0cc6a7 100644
--- a/lib/diskio/file_state_test.go
+++ b/lib/diskio/file_state_test.go
@@ -9,6 +9,8 @@ import (
"testing"
"testing/iotest"
+ "github.com/datawire/dlib/dlog"
+
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
)
@@ -50,7 +52,8 @@ func FuzzStatefulBufferedReader(f *testing.F) {
Reader: bytes.NewReader(content),
name: t.Name(),
}
- file = diskio.NewBufferedFile[int64](file, 4, 2)
+ ctx := dlog.NewTestContext(t, false)
+ file = diskio.NewBufferedFile[int64](ctx, file, 4, 2)
reader := diskio.NewStatefulFile[int64](file)
if err := iotest.TestReader(reader, content); err != nil {
t.Error(err)