summaryrefslogtreecommitdiff
path: root/lib/btrfs
diff options
context:
space:
mode:
authorLuke Shumaker <lukeshu@lukeshu.com>2023-02-12 16:17:02 -0700
committerLuke Shumaker <lukeshu@lukeshu.com>2023-02-12 16:17:02 -0700
commitcfcc753dc8906817e15b1b7c36b4dc12462d12e4 (patch)
treef5d2aa0caaa4cb336017ba7595c3425f4aa00bfc /lib/btrfs
parent29b6b9f997913f13a0bff8bb1278a61302413615 (diff)
parentf76faa4b8debd9c94751a03dd65e46c80a340a82 (diff)
Merge branch 'lukeshu/fast'
Diffstat (limited to 'lib/btrfs')
-rw-r--r--lib/btrfs/Makefile48
-rw-r--r--lib/btrfs/btrfsitem/item_blockgroup.go4
-rw-r--r--lib/btrfs/btrfsitem/item_chunk.go30
-rw-r--r--lib/btrfs/btrfsitem/item_dev.go4
-rw-r--r--lib/btrfs/btrfsitem/item_devextent.go4
-rw-r--r--lib/btrfs/btrfsitem/item_dir.go21
-rw-r--r--lib/btrfs/btrfsitem/item_empty.go4
-rw-r--r--lib/btrfs/btrfsitem/item_extent.go42
-rw-r--r--lib/btrfs/btrfsitem/item_extentcsum.go4
-rw-r--r--lib/btrfs/btrfsitem/item_extentdataref.go4
-rw-r--r--lib/btrfs/btrfsitem/item_fileextent.go17
-rw-r--r--lib/btrfs/btrfsitem/item_freespacebitmap.go21
-rw-r--r--lib/btrfs/btrfsitem/item_freespaceinfo.go4
-rw-r--r--lib/btrfs/btrfsitem/item_inode.go4
-rw-r--r--lib/btrfs/btrfsitem/item_inoderef.go40
-rw-r--r--lib/btrfs/btrfsitem/item_metadata.go31
-rw-r--r--lib/btrfs/btrfsitem/item_persistent.go4
-rw-r--r--lib/btrfs/btrfsitem/item_qgroupinfo.go2
-rw-r--r--lib/btrfs/btrfsitem/item_qgrouplimit.go2
-rw-r--r--lib/btrfs/btrfsitem/item_qgroupstatus.go2
-rw-r--r--lib/btrfs/btrfsitem/item_root.go4
-rw-r--r--lib/btrfs/btrfsitem/item_rootref.go17
-rw-r--r--lib/btrfs/btrfsitem/item_shareddataref.go4
-rw-r--r--lib/btrfs/btrfsitem/item_untyped.go4
-rw-r--r--lib/btrfs/btrfsitem/item_uuid.go2
-rw-r--r--lib/btrfs/btrfsitem/items.go57
-rw-r--r--lib/btrfs/btrfsitem/items_gen.go399
-rw-r--r--lib/btrfs/btrfstree/ops.go51
-rw-r--r--lib/btrfs/btrfstree/root.go4
-rw-r--r--lib/btrfs/btrfstree/types_node.go110
-rw-r--r--lib/btrfs/btrfstree/types_superblock.go4
-rw-r--r--lib/btrfs/btrfsvol/lvm.go10
-rw-r--r--lib/btrfs/csums.go24
-rw-r--r--lib/btrfs/io1_pv.go6
-rw-r--r--lib/btrfs/io2_lv.go4
-rw-r--r--lib/btrfs/io3_btree.go4
-rw-r--r--lib/btrfs/io4_fs.go76
37 files changed, 834 insertions, 238 deletions
diff --git a/lib/btrfs/Makefile b/lib/btrfs/Makefile
index cbcaf9e..a1fe747 100644
--- a/lib/btrfs/Makefile
+++ b/lib/btrfs/Makefile
@@ -8,9 +8,9 @@
btrfsitem/items.txt: btrfsitem $(wildcard btrfsitem/item_*.go) $(MAKEFILE_LIST)
{ \
- sed -En 's,^type (\S+) .* // (.*=.*),\1 \2,p' $(filter btrfsitem/item_%.go,$^) | while read -r typ keys; do \
+ sed -En 's,^type (\S+) .* // (trivial|complex) (.*=.*),\1 \2 \3,p' $(filter btrfsitem/item_%.go,$^) | while read -r typ class keys; do \
for key in $$keys; do \
- echo "$$key" "$$typ"; \
+ echo "$$key" "$$class" "$$typ"; \
done; \
done; \
} | LC_COLLATE=C sort >$@
@@ -24,19 +24,49 @@ btrfsitem/items_gen.go: btrfsitem/items.txt $(MAKEFILE_LIST)
echo 'import ('; \
echo '"reflect"'; \
echo; \
+ echo '"git.lukeshu.com/go/typedsync"'; \
+ echo; \
echo '"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"'; \
echo ')'; \
echo 'const ('; \
- sed -E 's,(.*)=(.*) (.*),\1_KEY=btrfsprim.\1_KEY,' $<; \
+ sed -E 's/(.*)=(.*) (trivial|complex) (.*)/\1_KEY=btrfsprim.\1_KEY/' $<; \
+ echo ')'; \
+ echo 'var ('; \
+ sed -E 's/(.*)=(.*) (trivial|complex) (.*)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/\l&Type = reflect.TypeOf(&{})/'; \
echo ')'; \
+ echo '// keytype2gotype is used by UnmarshalItem.'; \
echo 'var keytype2gotype = map[Type]reflect.Type{'; \
- sed -En 's|(.*)=([^:]*) (.*)|\1_KEY: reflect.TypeOf(\3{}),|p' $<; \
+ sed -En 's/(.*)=([^:]*) (trivial|complex) (.*)/\1_KEY: \l\4Type,/p' $<; \
echo '}'; \
+ echo '// untypedObjID2gotype is used by UnmarshalItem.'; \
echo 'var untypedObjID2gotype = map[btrfsprim.ObjID]reflect.Type{'; \
- sed -En 's|UNTYPED=0:(.*) (.*)|btrfsprim.\1: reflect.TypeOf(\2{}),|p' $<; \
+ sed -En 's/UNTYPED=0:(.*) (trivial|complex) (.*)/btrfsprim.\1: \l\3Type,/p' $<; \
echo '}'; \
- sed -En 's,(.*)=(.*) (.+),\3,p' $< | LC_COLLATE=C sort -u | sed 's,.*,func (&) isItem() {},'; \
- } | gofmt >$@
+ echo '// Pools.'; \
+ echo 'var ('; \
+ sed -E 's/(.*)=(.*) (trivial|complex) (.*)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/\l&Pool = typedsync.Pool[Item]{New: func() Item { return new(&) }}/'; \
+ echo ')'; \
+ echo '// gotype2pool is used by UnmarshalItem.'; \
+ echo 'var gotype2pool = map[reflect.Type]*typedsync.Pool[Item]{'; \
+ sed -E 's/(.*)=(.*) (trivial|complex) (.*)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/\l&Type: \&\l&Pool,/'; \
+ echo '}'; \
+ echo '// isItem implements Item.'; \
+ sed -En 's/(.*)=(.*) (trivial|complex) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/func (*&) isItem() {}/'; \
+ echo '// Free implements Item.'; \
+ sed -En 's/(.*)=(.*) (trivial) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/func (o *&) Free() {*o = &{}; \l&Pool.Put(o)}/'; \
+ echo '// Clone is a handy method.'; \
+ sed -En 's/(.*)=(.*) (trivial) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/func (o &) Clone() & { return o }/'; \
+ echo '// CloneItem implements Item.'; \
+ sed -En 's/(.*)=(.*) (trivial|complex) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/func (o *&) CloneItem() Item { ret, _ := \l&Pool.Get(); *(ret.(*&)) = o.Clone(); return ret }/'; \
+ echo '// Item type assertions.'; \
+ echo 'var ('; \
+ sed -En 's/(.*)=(.*) (trivial|complex) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/_ Item = (*&)(nil)/'; \
+ echo ')'; \
+ echo '// Clone type assertions.'; \
+ echo 'var ('; \
+ sed -En 's/(.*)=(.*) (trivial|complex) (.+)/\4/p' $< | LC_COLLATE=C sort -u | sed 's/.*/_ interface{ Clone() & } = &{}/'; \
+ echo ')'; \
+ } | sed 's/uUID/uuid/g' | gofmt >$@
files += btrfsitem/items_gen.go
btrfsprim/itemtype.go: btrfsitem/items.txt $(MAKEFILE_LIST)
@@ -47,11 +77,11 @@ btrfsprim/itemtype.go: btrfsitem/items.txt $(MAKEFILE_LIST)
echo 'import "fmt"'; \
echo 'type ItemType uint8'; \
echo 'const ('; \
- sed -E 's,(.*)=([^:]*)(:.*)? (.*),\1_KEY=ItemType(\2),' $< | uniq; \
+ sed -E 's,(.*)=([^:]*)(:.*)? (trivial|complex) (.*),\1_KEY=ItemType(\2),' $< | uniq; \
echo ')'; \
echo 'func (t ItemType) String() string {'; \
echo ' names := map[ItemType]string{'; \
- sed -E 's@(.*)=(.*) (.*)@\1_KEY: "\1",@' $< | sed 's/"UUID_/&KEY_/'; \
+ sed -E 's@(.*)=(.*) (trivial|complex) (.*)@\1_KEY: "\1",@' $< | sed 's/"UUID_/&KEY_/'; \
echo ' }'; \
echo ' if name, ok := names[t]; ok {'; \
echo ' return name'; \
diff --git a/lib/btrfs/btrfsitem/item_blockgroup.go b/lib/btrfs/btrfsitem/item_blockgroup.go
index 6fc09ac..ae0ca12 100644
--- a/lib/btrfs/btrfsitem/item_blockgroup.go
+++ b/lib/btrfs/btrfsitem/item_blockgroup.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -12,7 +12,7 @@ import (
// key.objectid = logical_addr
// key.offset = size of chunk
-type BlockGroup struct { // BLOCK_GROUP_ITEM=192
+type BlockGroup struct { // trivial BLOCK_GROUP_ITEM=192
Used int64 `bin:"off=0, siz=8"`
ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // always FIRST_CHUNK_TREE_OBJECTID
Flags btrfsvol.BlockGroupFlags `bin:"off=16, siz=8"`
diff --git a/lib/btrfs/btrfsitem/item_chunk.go b/lib/btrfs/btrfsitem/item_chunk.go
index 1f1d577..2280a0b 100644
--- a/lib/btrfs/btrfsitem/item_chunk.go
+++ b/lib/btrfs/btrfsitem/item_chunk.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -15,7 +15,7 @@ import (
//
// key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID
// key.offset = logical_addr
-type Chunk struct { // CHUNK_ITEM=228
+type Chunk struct { // complex CHUNK_ITEM=228
Head ChunkHeader
Stripes []ChunkStripe
}
@@ -60,20 +60,36 @@ func (chunk Chunk) Mappings(key btrfsprim.Key) []btrfsvol.Mapping {
return ret
}
+var chunkStripePool containers.SlicePool[ChunkStripe]
+
+func (chunk *Chunk) Free() {
+ for i := range chunk.Stripes {
+ chunk.Stripes[i] = ChunkStripe{}
+ }
+ chunkStripePool.Put(chunk.Stripes)
+ *chunk = Chunk{}
+ chunkPool.Put(chunk)
+}
+
+func (chunk Chunk) Clone() Chunk {
+ ret := chunk
+ ret.Stripes = chunkStripePool.Get(len(chunk.Stripes))
+ copy(ret.Stripes, chunk.Stripes)
+ return ret
+}
+
func (chunk *Chunk) UnmarshalBinary(dat []byte) (int, error) {
n, err := binstruct.Unmarshal(dat, &chunk.Head)
if err != nil {
return n, err
}
- chunk.Stripes = nil
- for i := 0; i < int(chunk.Head.NumStripes); i++ {
- var stripe ChunkStripe
- _n, err := binstruct.Unmarshal(dat[n:], &stripe)
+ chunk.Stripes = chunkStripePool.Get(int(chunk.Head.NumStripes))
+ for i := range chunk.Stripes {
+ _n, err := binstruct.Unmarshal(dat[n:], &chunk.Stripes[i])
n += _n
if err != nil {
return n, err
}
- chunk.Stripes = append(chunk.Stripes, stripe)
}
return n, nil
}
diff --git a/lib/btrfs/btrfsitem/item_dev.go b/lib/btrfs/btrfsitem/item_dev.go
index fd7f458..188711e 100644
--- a/lib/btrfs/btrfsitem/item_dev.go
+++ b/lib/btrfs/btrfsitem/item_dev.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -12,7 +12,7 @@ import (
// key.objectid = BTRFS_DEV_ITEMS_OBJECTID
// key.offset = device_id (starting at 1)
-type Dev struct { // DEV_ITEM=216
+type Dev struct { // trivial DEV_ITEM=216
DevID btrfsvol.DeviceID `bin:"off=0x0, siz=0x8"`
NumBytes uint64 `bin:"off=0x8, siz=0x8"`
diff --git a/lib/btrfs/btrfsitem/item_devextent.go b/lib/btrfs/btrfsitem/item_devextent.go
index 47bdbcf..cade165 100644
--- a/lib/btrfs/btrfsitem/item_devextent.go
+++ b/lib/btrfs/btrfsitem/item_devextent.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -12,7 +12,7 @@ import (
// key.objectid = device_id
// key.offset = physical_addr
-type DevExtent struct { // DEV_EXTENT=204
+type DevExtent struct { // trivial DEV_EXTENT=204
ChunkTree btrfsprim.ObjID `bin:"off=0, siz=8"` // always CHUNK_TREE_OBJECTID
ChunkObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // which chunk within .ChunkTree owns this extent, always FIRST_CHUNK_TREE_OBJECTID
ChunkOffset btrfsvol.LogicalAddr `bin:"off=16, siz=8"` // offset of the CHUNK_ITEM that owns this extent, within the .ChunkObjectID
diff --git a/lib/btrfs/btrfsitem/item_dir.go b/lib/btrfs/btrfsitem/item_dir.go
index 584e44d..0049072 100644
--- a/lib/btrfs/btrfsitem/item_dir.go
+++ b/lib/btrfs/btrfsitem/item_dir.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -23,7 +23,7 @@ func NameHash(dat []byte) uint64 {
// key.offset =
// - for DIR_ITEM and XATTR_ITEM = NameHash(name)
// - for DIR_INDEX = index id in the directory (starting at 2, because "." and "..")
-type DirEntry struct { // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
+type DirEntry struct { // complex DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
Location btrfsprim.Key `bin:"off=0x0, siz=0x11"`
TransID int64 `bin:"off=0x11, siz=8"`
DataLen uint16 `bin:"off=0x19, siz=2"` // [ignored-when-writing]
@@ -34,6 +34,19 @@ type DirEntry struct { // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
Name []byte `bin:"-"`
}
+func (o *DirEntry) Free() {
+ bytePool.Put(o.Data)
+ bytePool.Put(o.Name)
+ *o = DirEntry{}
+ dirEntryPool.Put(o)
+}
+
+func (o DirEntry) Clone() DirEntry {
+ o.Data = cloneBytes(o.Data)
+ o.Name = cloneBytes(o.Name)
+ return o
+}
+
func (o *DirEntry) UnmarshalBinary(dat []byte) (int, error) {
if err := binutil.NeedNBytes(dat, 0x1e); err != nil {
return 0, err
@@ -49,9 +62,9 @@ func (o *DirEntry) UnmarshalBinary(dat []byte) (int, error) {
if err := binutil.NeedNBytes(dat, 0x1e+int(o.DataLen)+int(o.NameLen)); err != nil {
return 0, err
}
- o.Name = dat[n : n+int(o.NameLen)]
+ o.Name = cloneBytes(dat[n : n+int(o.NameLen)])
n += int(o.NameLen)
- o.Data = dat[n : n+int(o.DataLen)]
+ o.Data = cloneBytes(dat[n : n+int(o.DataLen)])
n += int(o.DataLen)
return n, nil
}
diff --git a/lib/btrfs/btrfsitem/item_empty.go b/lib/btrfs/btrfsitem/item_empty.go
index 47b4a15..7343c8f 100644
--- a/lib/btrfs/btrfsitem/item_empty.go
+++ b/lib/btrfs/btrfsitem/item_empty.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -8,6 +8,6 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
)
-type Empty struct { // ORPHAN_ITEM=48 TREE_BLOCK_REF=176 SHARED_BLOCK_REF=182 FREE_SPACE_EXTENT=199 QGROUP_RELATION=246
+type Empty struct { // trivial ORPHAN_ITEM=48 TREE_BLOCK_REF=176 SHARED_BLOCK_REF=182 FREE_SPACE_EXTENT=199 QGROUP_RELATION=246
binstruct.End `bin:"off=0"`
}
diff --git a/lib/btrfs/btrfsitem/item_extent.go b/lib/btrfs/btrfsitem/item_extent.go
index 66aae1d..3789cfe 100644
--- a/lib/btrfs/btrfsitem/item_extent.go
+++ b/lib/btrfs/btrfsitem/item_extent.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -9,18 +9,44 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/fmtutil"
)
// key.objectid = laddr of the extent
// key.offset = length of the extent
-type Extent struct { // EXTENT_ITEM=168
+type Extent struct { // complex EXTENT_ITEM=168
Head ExtentHeader
Info TreeBlockInfo // only if .Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK)
Refs []ExtentInlineRef
}
+var extentInlineRefPool containers.SlicePool[ExtentInlineRef]
+
+func (o *Extent) Free() {
+ for i := range o.Refs {
+ if o.Refs[i].Body != nil {
+ o.Refs[i].Body.Free()
+ }
+ o.Refs[i] = ExtentInlineRef{}
+ }
+ extentInlineRefPool.Put(o.Refs)
+ *o = Extent{}
+ extentPool.Put(o)
+}
+
+func (o Extent) Clone() Extent {
+ ret := o
+ ret.Refs = extentInlineRefPool.Get(len(o.Refs))
+ copy(ret.Refs, o.Refs)
+ for i := range ret.Refs {
+ ret.Refs[i].Body = o.Refs[i].Body.CloneItem()
+ }
+ return ret
+}
+
func (o *Extent) UnmarshalBinary(dat []byte) (int, error) {
+ *o = Extent{}
n, err := binstruct.Unmarshal(dat, &o.Head)
if err != nil {
return n, err
@@ -32,7 +58,9 @@ func (o *Extent) UnmarshalBinary(dat []byte) (int, error) {
return n, err
}
}
- o.Refs = nil
+ if n < len(dat) {
+ o.Refs = extentInlineRefPool.Get(1)[:0]
+ }
for n < len(dat) {
var ref ExtentInlineRef
_n, err := binstruct.Unmarshal(dat[n:], &ref)
@@ -114,8 +142,8 @@ func (o *ExtentInlineRef) UnmarshalBinary(dat []byte) (int, error) {
return n, err
}
case EXTENT_DATA_REF_KEY:
- var dref ExtentDataRef
- _n, err := binstruct.Unmarshal(dat[n:], &dref)
+ dref, _ := extentDataRefPool.Get()
+ _n, err := binstruct.Unmarshal(dat[n:], dref)
n += _n
o.Body = dref
if err != nil {
@@ -127,8 +155,8 @@ func (o *ExtentInlineRef) UnmarshalBinary(dat []byte) (int, error) {
if err != nil {
return n, err
}
- var sref SharedDataRef
- _n, err = binstruct.Unmarshal(dat[n:], &sref)
+ sref, _ := sharedDataRefPool.Get()
+ _n, err = binstruct.Unmarshal(dat[n:], sref)
n += _n
o.Body = sref
if err != nil {
diff --git a/lib/btrfs/btrfsitem/item_extentcsum.go b/lib/btrfs/btrfsitem/item_extentcsum.go
index bcfe334..dfa166d 100644
--- a/lib/btrfs/btrfsitem/item_extentcsum.go
+++ b/lib/btrfs/btrfsitem/item_extentcsum.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -13,7 +13,7 @@ import (
// key.objectid = BTRFS_EXTENT_CSUM_OBJECTID
// key.offset = laddr of checksummed region
-type ExtentCSum struct { // EXTENT_CSUM=128
+type ExtentCSum struct { // trivial EXTENT_CSUM=128
// Checksum of each sector starting at key.offset
btrfssum.SumRun[btrfsvol.LogicalAddr]
}
diff --git a/lib/btrfs/btrfsitem/item_extentdataref.go b/lib/btrfs/btrfsitem/item_extentdataref.go
index 8c856e2..6f2257b 100644
--- a/lib/btrfs/btrfsitem/item_extentdataref.go
+++ b/lib/btrfs/btrfsitem/item_extentdataref.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -11,7 +11,7 @@ import (
// key.objectid = laddr of the extent being referenced
// key.offset = crc32c([root,objectid,offset])
-type ExtentDataRef struct { // EXTENT_DATA_REF=178
+type ExtentDataRef struct { // trivial EXTENT_DATA_REF=178
Root btrfsprim.ObjID `bin:"off=0, siz=8"` // subvolume tree ID that references this extent
ObjectID btrfsprim.ObjID `bin:"off=8, siz=8"` // inode number that references this extent within the .Root subvolume
Offset int64 `bin:"off=16, siz=8"` // byte offset for the extent within the file
diff --git a/lib/btrfs/btrfsitem/item_fileextent.go b/lib/btrfs/btrfsitem/item_fileextent.go
index 83e5d34..30a14ef 100644
--- a/lib/btrfs/btrfsitem/item_fileextent.go
+++ b/lib/btrfs/btrfsitem/item_fileextent.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -14,7 +14,7 @@ import (
// key.objectid = inode
// key.offset = offset within file
-type FileExtent struct { // EXTENT_DATA=108
+type FileExtent struct { // complex EXTENT_DATA=108
Generation btrfsprim.Generation `bin:"off=0x0, siz=0x8"` // transaction ID that created this extent
RAMBytes int64 `bin:"off=0x8, siz=0x8"` // upper bound of what compressed data will decompress to
@@ -46,6 +46,17 @@ type FileExtentExtent struct {
binstruct.End `bin:"off=0x20"`
}
+func (o *FileExtent) Free() {
+ bytePool.Put(o.BodyInline)
+ *o = FileExtent{}
+ fileExtentPool.Put(o)
+}
+
+func (o FileExtent) Clone() FileExtent {
+ o.BodyInline = cloneBytes(o.BodyInline)
+ return o
+}
+
func (o *FileExtent) UnmarshalBinary(dat []byte) (int, error) {
n, err := binstruct.UnmarshalWithoutInterface(dat, o)
if err != nil {
@@ -53,7 +64,7 @@ func (o *FileExtent) UnmarshalBinary(dat []byte) (int, error) {
}
switch o.Type {
case FILE_EXTENT_INLINE:
- o.BodyInline = dat[n:]
+ o.BodyInline = cloneBytes(dat[n:])
n += len(o.BodyInline)
case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
_n, err := binstruct.Unmarshal(dat[n:], &o.BodyExtent)
diff --git a/lib/btrfs/btrfsitem/item_freespacebitmap.go b/lib/btrfs/btrfsitem/item_freespacebitmap.go
index ad46204..ebc00e4 100644
--- a/lib/btrfs/btrfsitem/item_freespacebitmap.go
+++ b/lib/btrfs/btrfsitem/item_freespacebitmap.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -6,13 +6,26 @@ package btrfsitem
// key.objectid = object ID of the FreeSpaceInfo (logical_addr)
// key.offset = offset of the FreeSpaceInfo (size)
-type FreeSpaceBitmap []byte // FREE_SPACE_BITMAP=200
+type FreeSpaceBitmap struct { // complex FREE_SPACE_BITMAP=200
+ Bitmap []byte
+}
+
+func (o *FreeSpaceBitmap) Free() {
+ bytePool.Put(o.Bitmap)
+ *o = FreeSpaceBitmap{}
+ freeSpaceBitmapPool.Put(o)
+}
+
+func (o FreeSpaceBitmap) Clone() FreeSpaceBitmap {
+ o.Bitmap = cloneBytes(o.Bitmap)
+ return o
+}
func (o *FreeSpaceBitmap) UnmarshalBinary(dat []byte) (int, error) {
- *o = dat
+ o.Bitmap = cloneBytes(dat)
return len(dat), nil
}
func (o FreeSpaceBitmap) MarshalBinary() ([]byte, error) {
- return []byte(o), nil
+ return append([]byte(nil), o.Bitmap...), nil
}
diff --git a/lib/btrfs/btrfsitem/item_freespaceinfo.go b/lib/btrfs/btrfsitem/item_freespaceinfo.go
index b38da20..0699367 100644
--- a/lib/btrfs/btrfsitem/item_freespaceinfo.go
+++ b/lib/btrfs/btrfsitem/item_freespaceinfo.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -11,7 +11,7 @@ import (
// key.objectid = object ID of the BlockGroup (logical_addr)
// key.offset = offset of the BlockGroup (size)
-type FreeSpaceInfo struct { // FREE_SPACE_INFO=198
+type FreeSpaceInfo struct { // trivial FREE_SPACE_INFO=198
ExtentCount int32 `bin:"off=0, siz=4"`
Flags FreeSpaceFlags `bin:"off=4, siz=4"`
binstruct.End `bin:"off=8"`
diff --git a/lib/btrfs/btrfsitem/item_inode.go b/lib/btrfs/btrfsitem/item_inode.go
index 704b56a..69f8445 100644
--- a/lib/btrfs/btrfsitem/item_inode.go
+++ b/lib/btrfs/btrfsitem/item_inode.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -12,7 +12,7 @@ import (
// key.objectid = inode number
// key.offset = 0
-type Inode struct { // INODE_ITEM=1
+type Inode struct { // trivial INODE_ITEM=1
Generation btrfsprim.Generation `bin:"off=0x00, siz=0x08"`
TransID int64 `bin:"off=0x08, siz=0x08"`
Size int64 `bin:"off=0x10, siz=0x08"` // stat
diff --git a/lib/btrfs/btrfsitem/item_inoderef.go b/lib/btrfs/btrfsitem/item_inoderef.go
index 083f19e..074b26d 100644
--- a/lib/btrfs/btrfsitem/item_inoderef.go
+++ b/lib/btrfs/btrfsitem/item_inoderef.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -9,6 +9,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct/binutil"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
)
// key.objectid = inode number of the file
@@ -16,10 +17,37 @@ import (
//
// Might have multiple entries if the same file has multiple hardlinks
// in the same directory.
-type InodeRefs []InodeRef // INODE_REF=12
+type InodeRefs struct { // complex INODE_REF=12
+ Refs []InodeRef
+}
+
+var inodeRefPool containers.SlicePool[InodeRef]
+
+func (o *InodeRefs) Free() {
+ for i := range o.Refs {
+ bytePool.Put(o.Refs[i].Name)
+ o.Refs[i] = InodeRef{}
+ }
+ inodeRefPool.Put(o.Refs)
+ *o = InodeRefs{}
+ inodeRefsPool.Put(o)
+}
+
+func (o InodeRefs) Clone() InodeRefs {
+ var ret InodeRefs
+ ret.Refs = inodeRefPool.Get(len(o.Refs))
+ copy(ret.Refs, o.Refs)
+ for i := range ret.Refs {
+ ret.Refs[i].Name = cloneBytes(o.Refs[i].Name)
+ }
+ return ret
+}
func (o *InodeRefs) UnmarshalBinary(dat []byte) (int, error) {
- *o = nil
+ o.Refs = nil
+ if len(dat) > 0 {
+ o.Refs = inodeRefPool.Get(1)[:0]
+ }
n := 0
for n < len(dat) {
var ref InodeRef
@@ -28,14 +56,14 @@ func (o *InodeRefs) UnmarshalBinary(dat []byte) (int, error) {
if err != nil {
return n, err
}
- *o = append(*o, ref)
+ o.Refs = append(o.Refs, ref)
}
return n, nil
}
func (o InodeRefs) MarshalBinary() ([]byte, error) {
var dat []byte
- for _, ref := range o {
+ for _, ref := range o.Refs {
_dat, err := binstruct.Marshal(ref)
dat = append(dat, _dat...)
if err != nil {
@@ -68,7 +96,7 @@ func (o *InodeRef) UnmarshalBinary(dat []byte) (int, error) {
return 0, err
}
dat = dat[n:]
- o.Name = dat[:o.NameLen]
+ o.Name = cloneBytes(dat[:o.NameLen])
n += int(o.NameLen)
return n, nil
}
diff --git a/lib/btrfs/btrfsitem/item_metadata.go b/lib/btrfs/btrfsitem/item_metadata.go
index e90af8d..db2315e 100644
--- a/lib/btrfs/btrfsitem/item_metadata.go
+++ b/lib/btrfs/btrfsitem/item_metadata.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -9,17 +9,42 @@ import (
)
// Metadata is like Extent, but doesn't have .Info.
-type Metadata struct { // METADATA_ITEM=169
+type Metadata struct { // complex METADATA_ITEM=169
Head ExtentHeader
Refs []ExtentInlineRef
}
+func (o *Metadata) Free() {
+ for i := range o.Refs {
+ if o.Refs[i].Body != nil {
+ o.Refs[i].Body.Free()
+ }
+ o.Refs[i] = ExtentInlineRef{}
+ }
+ extentInlineRefPool.Put(o.Refs)
+ *o = Metadata{}
+ metadataPool.Put(o)
+}
+
+func (o Metadata) Clone() Metadata {
+ ret := o
+ ret.Refs = extentInlineRefPool.Get(len(o.Refs))
+ copy(ret.Refs, o.Refs)
+ for i := range ret.Refs {
+ ret.Refs[i].Body = o.Refs[i].Body.CloneItem()
+ }
+ return ret
+}
+
func (o *Metadata) UnmarshalBinary(dat []byte) (int, error) {
+ *o = Metadata{}
n, err := binstruct.Unmarshal(dat, &o.Head)
if err != nil {
return n, err
}
- o.Refs = nil
+ if n < len(dat) {
+ o.Refs = extentInlineRefPool.Get(1)[:0]
+ }
for n < len(dat) {
var ref ExtentInlineRef
_n, err := binstruct.Unmarshal(dat[n:], &ref)
diff --git a/lib/btrfs/btrfsitem/item_persistent.go b/lib/btrfs/btrfsitem/item_persistent.go
index a827074..4655aee 100644
--- a/lib/btrfs/btrfsitem/item_persistent.go
+++ b/lib/btrfs/btrfsitem/item_persistent.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -17,7 +17,7 @@ const (
DEV_STAT_VALUES_MAX
)
-type DevStats struct { // PERSISTENT_ITEM=249
+type DevStats struct { // trivial PERSISTENT_ITEM=249
Values [DEV_STAT_VALUES_MAX]int64 `bin:"off=0, siz=40"`
binstruct.End `bin:"off=40"`
}
diff --git a/lib/btrfs/btrfsitem/item_qgroupinfo.go b/lib/btrfs/btrfsitem/item_qgroupinfo.go
index 8cceb0b..6699030 100644
--- a/lib/btrfs/btrfsitem/item_qgroupinfo.go
+++ b/lib/btrfs/btrfsitem/item_qgroupinfo.go
@@ -11,7 +11,7 @@ import (
// key.objectid = 0
// key.offset = ID of the qgroup
-type QGroupInfo struct { // QGROUP_INFO=242
+type QGroupInfo struct { // trivial QGROUP_INFO=242
Generation btrfsprim.Generation `bin:"off=0, siz=8"`
ReferencedBytes uint64 `bin:"off=8, siz=8"`
ReferencedBytesCompressed uint64 `bin:"off=16, siz=8"`
diff --git a/lib/btrfs/btrfsitem/item_qgrouplimit.go b/lib/btrfs/btrfsitem/item_qgrouplimit.go
index 9d1e05f..47f7eca 100644
--- a/lib/btrfs/btrfsitem/item_qgrouplimit.go
+++ b/lib/btrfs/btrfsitem/item_qgrouplimit.go
@@ -36,7 +36,7 @@ func (f QGroupLimitFlags) String() string {
// key.objectid = 0
// key.offset = ID of the qgroup
-type QGroupLimit struct { // QGROUP_LIMIT=244
+type QGroupLimit struct { // trivial QGROUP_LIMIT=244
Flags QGroupLimitFlags `bin:"off=0, siz=8"`
MaxReferenced uint64 `bin:"off=8, siz=8"`
MaxExclusive uint64 `bin:"off=16, siz=8"`
diff --git a/lib/btrfs/btrfsitem/item_qgroupstatus.go b/lib/btrfs/btrfsitem/item_qgroupstatus.go
index e7bd62c..346c913 100644
--- a/lib/btrfs/btrfsitem/item_qgroupstatus.go
+++ b/lib/btrfs/btrfsitem/item_qgroupstatus.go
@@ -34,7 +34,7 @@ const QGroupStatusVersion uint64 = 1
// key.objectid = 0
// key.offset = 0
-type QGroupStatus struct { // QGROUP_STATUS=240
+type QGroupStatus struct { // trivial QGROUP_STATUS=240
Version uint64 `bin:"off=0, siz=8"`
Generation btrfsprim.Generation `bin:"off=8, siz=8"`
Flags QGroupStatusFlags `bin:"off=16, siz=8"`
diff --git a/lib/btrfs/btrfsitem/item_root.go b/lib/btrfs/btrfsitem/item_root.go
index ffbbf4d..c0db900 100644
--- a/lib/btrfs/btrfsitem/item_root.go
+++ b/lib/btrfs/btrfsitem/item_root.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -15,7 +15,7 @@ import (
// key.offset = either
// - 0 if objectid is one of the BTRFS_*_TREE_OBJECTID defines or a non-snapshot volume; or
// - transaction_id of when this snapshot was created
-type Root struct { // ROOT_ITEM=132
+type Root struct { // trivial ROOT_ITEM=132
Inode Inode `bin:"off=0x000, siz=0xa0"` // ???
Generation btrfsprim.Generation `bin:"off=0x0a0, siz=0x08"`
RootDirID btrfsprim.ObjID `bin:"off=0x0a8, siz=0x08"` // inode number of the root inode
diff --git a/lib/btrfs/btrfsitem/item_rootref.go b/lib/btrfs/btrfsitem/item_rootref.go
index b33883d..4179890 100644
--- a/lib/btrfs/btrfsitem/item_rootref.go
+++ b/lib/btrfs/btrfsitem/item_rootref.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -19,7 +19,7 @@ import (
// ROOT_REF | ROOT_BACKREF
// key.objectid = ID of the parent subvolume | ID of the child subvolume
// key.offset = ID of the child subvolume | ID of the parent subvolume
-type RootRef struct { // ROOT_REF=156 ROOT_BACKREF=144
+type RootRef struct { // complex ROOT_REF=156 ROOT_BACKREF=144
DirID btrfsprim.ObjID `bin:"off=0x00, siz=0x8"` // inode of the parent directory of the dir entry
Sequence int64 `bin:"off=0x08, siz=0x8"` // index of that dir entry within the parent
NameLen uint16 `bin:"off=0x10, siz=0x2"` // [ignored-when-writing]
@@ -27,6 +27,17 @@ type RootRef struct { // ROOT_REF=156 ROOT_BACKREF=144
Name []byte `bin:"-"`
}
+func (o *RootRef) Free() {
+ bytePool.Put(o.Name)
+ *o = RootRef{}
+ rootRefPool.Put(o)
+}
+
+func (o RootRef) Clone() RootRef {
+ o.Name = cloneBytes(o.Name)
+ return o
+}
+
func (o *RootRef) UnmarshalBinary(dat []byte) (int, error) {
if err := binutil.NeedNBytes(dat, 0x12); err != nil {
return 0, err
@@ -42,7 +53,7 @@ func (o *RootRef) UnmarshalBinary(dat []byte) (int, error) {
if err := binutil.NeedNBytes(dat, 0x12+int(o.NameLen)); err != nil {
return 0, err
}
- o.Name = dat[n : n+int(o.NameLen)]
+ o.Name = cloneBytes(dat[n : n+int(o.NameLen)])
n += int(o.NameLen)
return n, nil
}
diff --git a/lib/btrfs/btrfsitem/item_shareddataref.go b/lib/btrfs/btrfsitem/item_shareddataref.go
index d7765af..6143a5c 100644
--- a/lib/btrfs/btrfsitem/item_shareddataref.go
+++ b/lib/btrfs/btrfsitem/item_shareddataref.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -12,7 +12,7 @@ import (
//
// key.offset = laddr of the leaf node containing the FileExtent
// (EXTENT_DATA_KEY) for this reference.
-type SharedDataRef struct { // SHARED_DATA_REF=184
+type SharedDataRef struct { // trivial SHARED_DATA_REF=184
Count int32 `bin:"off=0, siz=4"` // reference count
binstruct.End `bin:"off=4"`
}
diff --git a/lib/btrfs/btrfsitem/item_untyped.go b/lib/btrfs/btrfsitem/item_untyped.go
index acf4ebe..9bda094 100644
--- a/lib/btrfs/btrfsitem/item_untyped.go
+++ b/lib/btrfs/btrfsitem/item_untyped.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -9,7 +9,7 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
)
-type FreeSpaceHeader struct { // UNTYPED=0:FREE_SPACE_OBJECTID
+type FreeSpaceHeader struct { // trivial UNTYPED=0:FREE_SPACE_OBJECTID
Location btrfsprim.Key `bin:"off=0x00, siz=0x11"`
Generation btrfsprim.Generation `bin:"off=0x11, siz=0x8"`
NumEntries int64 `bin:"off=0x19, siz=0x8"`
diff --git a/lib/btrfs/btrfsitem/item_uuid.go b/lib/btrfs/btrfsitem/item_uuid.go
index 5f5f357..fca409d 100644
--- a/lib/btrfs/btrfsitem/item_uuid.go
+++ b/lib/btrfs/btrfsitem/item_uuid.go
@@ -16,7 +16,7 @@ import (
//
// key.objectid = first half of UUID
// key.offset = second half of UUID
-type UUIDMap struct { // UUID_SUBVOL=251 UUID_RECEIVED_SUBVOL=252
+type UUIDMap struct { // trivial UUID_SUBVOL=251 UUID_RECEIVED_SUBVOL=252
ObjID btrfsprim.ObjID `bin:"off=0, siz=8"`
binstruct.End `bin:"off=8"`
}
diff --git a/lib/btrfs/btrfsitem/items.go b/lib/btrfs/btrfsitem/items.go
index 67f96fa..49d421f 100644
--- a/lib/btrfs/btrfsitem/items.go
+++ b/lib/btrfs/btrfsitem/items.go
@@ -8,16 +8,21 @@ import (
"fmt"
"reflect"
+ "git.lukeshu.com/go/typedsync"
+
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
)
type Type = btrfsprim.ItemType
type Item interface {
isItem()
+ Free()
+ CloneItem() Item
}
type Error struct {
@@ -25,7 +30,22 @@ type Error struct {
Err error
}
-func (Error) isItem() {}
+var errorPool = &typedsync.Pool[*Error]{New: func() *Error { return new(Error) }}
+
+func (*Error) isItem() {}
+
+func (o *Error) Free() {
+ *o = Error{}
+ errorPool.Put(o)
+}
+
+func (o Error) Clone() Error { return o }
+
+func (o *Error) CloneItem() Item {
+ ret, _ := errorPool.Get()
+ *ret = *o
+ return ret
+}
func (o Error) MarshalBinary() ([]byte, error) {
return o.Dat, nil
@@ -43,41 +63,58 @@ func UnmarshalItem(key btrfsprim.Key, csumType btrfssum.CSumType, dat []byte) It
var ok bool
gotyp, ok = untypedObjID2gotype[key.ObjectID]
if !ok {
- return Error{
+ ret, _ := errorPool.Get()
+ *ret = Error{
Dat: dat,
Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v, ObjectID:%v}, dat): unknown object ID for untyped item",
key.ItemType, key.ObjectID),
}
+ return ret
}
} else {
var ok bool
gotyp, ok = keytype2gotype[key.ItemType]
if !ok {
- return Error{
+ ret, _ := errorPool.Get()
+ *ret = Error{
Dat: dat,
Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): unknown item type", key.ItemType),
}
+ return ret
}
}
- retPtr := reflect.New(gotyp)
- if csums, ok := retPtr.Interface().(*ExtentCSum); ok {
+ ptr, _ := gotype2pool[gotyp].Get()
+ if csums, ok := ptr.(*ExtentCSum); ok {
csums.ChecksumSize = csumType.Size()
csums.Addr = btrfsvol.LogicalAddr(key.Offset)
}
- n, err := binstruct.Unmarshal(dat, retPtr.Interface())
+ n, err := binstruct.Unmarshal(dat, ptr)
if err != nil {
- return Error{
+ ptr.Free()
+ ret, _ := errorPool.Get()
+ *ret = Error{
Dat: dat,
Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): %w", key.ItemType, err),
}
+ return ret
}
if n < len(dat) {
- return Error{
+ ptr.Free()
+ ret, _ := errorPool.Get()
+ *ret = Error{
Dat: dat,
Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): left over data: got %v bytes but only consumed %v",
key.ItemType, len(dat), n),
}
+ return ret
}
- //nolint:forcetypeassert // items_gen.go has all types in keytype2gotype implement the Item interface.
- return retPtr.Elem().Interface().(Item)
+ return ptr
+}
+
+var bytePool containers.SlicePool[byte]
+
+func cloneBytes(in []byte) []byte {
+ out := bytePool.Get(len(in))
+ copy(out, in)
+ return out
}
diff --git a/lib/btrfs/btrfsitem/items_gen.go b/lib/btrfs/btrfsitem/items_gen.go
index 9daef81..31d2a76 100644
--- a/lib/btrfs/btrfsitem/items_gen.go
+++ b/lib/btrfs/btrfsitem/items_gen.go
@@ -5,6 +5,8 @@ package btrfsitem
import (
"reflect"
+ "git.lukeshu.com/go/typedsync"
+
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsprim"
)
@@ -43,64 +45,345 @@ const (
XATTR_ITEM_KEY = btrfsprim.XATTR_ITEM_KEY
)
+var (
+ blockGroupType = reflect.TypeOf(BlockGroup{})
+ chunkType = reflect.TypeOf(Chunk{})
+ devType = reflect.TypeOf(Dev{})
+ devExtentType = reflect.TypeOf(DevExtent{})
+ devStatsType = reflect.TypeOf(DevStats{})
+ dirEntryType = reflect.TypeOf(DirEntry{})
+ emptyType = reflect.TypeOf(Empty{})
+ extentType = reflect.TypeOf(Extent{})
+ extentCSumType = reflect.TypeOf(ExtentCSum{})
+ extentDataRefType = reflect.TypeOf(ExtentDataRef{})
+ fileExtentType = reflect.TypeOf(FileExtent{})
+ freeSpaceBitmapType = reflect.TypeOf(FreeSpaceBitmap{})
+ freeSpaceHeaderType = reflect.TypeOf(FreeSpaceHeader{})
+ freeSpaceInfoType = reflect.TypeOf(FreeSpaceInfo{})
+ inodeType = reflect.TypeOf(Inode{})
+ inodeRefsType = reflect.TypeOf(InodeRefs{})
+ metadataType = reflect.TypeOf(Metadata{})
+ qGroupInfoType = reflect.TypeOf(QGroupInfo{})
+ qGroupLimitType = reflect.TypeOf(QGroupLimit{})
+ qGroupStatusType = reflect.TypeOf(QGroupStatus{})
+ rootType = reflect.TypeOf(Root{})
+ rootRefType = reflect.TypeOf(RootRef{})
+ sharedDataRefType = reflect.TypeOf(SharedDataRef{})
+ uuidMapType = reflect.TypeOf(UUIDMap{})
+)
+
+// keytype2gotype is used by UnmarshalItem.
var keytype2gotype = map[Type]reflect.Type{
- BLOCK_GROUP_ITEM_KEY: reflect.TypeOf(BlockGroup{}),
- CHUNK_ITEM_KEY: reflect.TypeOf(Chunk{}),
- DEV_EXTENT_KEY: reflect.TypeOf(DevExtent{}),
- DEV_ITEM_KEY: reflect.TypeOf(Dev{}),
- DIR_INDEX_KEY: reflect.TypeOf(DirEntry{}),
- DIR_ITEM_KEY: reflect.TypeOf(DirEntry{}),
- EXTENT_CSUM_KEY: reflect.TypeOf(ExtentCSum{}),
- EXTENT_DATA_KEY: reflect.TypeOf(FileExtent{}),
- EXTENT_DATA_REF_KEY: reflect.TypeOf(ExtentDataRef{}),
- EXTENT_ITEM_KEY: reflect.TypeOf(Extent{}),
- FREE_SPACE_BITMAP_KEY: reflect.TypeOf(FreeSpaceBitmap{}),
- FREE_SPACE_EXTENT_KEY: reflect.TypeOf(Empty{}),
- FREE_SPACE_INFO_KEY: reflect.TypeOf(FreeSpaceInfo{}),
- INODE_ITEM_KEY: reflect.TypeOf(Inode{}),
- INODE_REF_KEY: reflect.TypeOf(InodeRefs{}),
- METADATA_ITEM_KEY: reflect.TypeOf(Metadata{}),
- ORPHAN_ITEM_KEY: reflect.TypeOf(Empty{}),
- PERSISTENT_ITEM_KEY: reflect.TypeOf(DevStats{}),
- QGROUP_INFO_KEY: reflect.TypeOf(QGroupInfo{}),
- QGROUP_LIMIT_KEY: reflect.TypeOf(QGroupLimit{}),
- QGROUP_RELATION_KEY: reflect.TypeOf(Empty{}),
- QGROUP_STATUS_KEY: reflect.TypeOf(QGroupStatus{}),
- ROOT_BACKREF_KEY: reflect.TypeOf(RootRef{}),
- ROOT_ITEM_KEY: reflect.TypeOf(Root{}),
- ROOT_REF_KEY: reflect.TypeOf(RootRef{}),
- SHARED_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
- SHARED_DATA_REF_KEY: reflect.TypeOf(SharedDataRef{}),
- TREE_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
- UUID_RECEIVED_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
- UUID_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
- XATTR_ITEM_KEY: reflect.TypeOf(DirEntry{}),
+ BLOCK_GROUP_ITEM_KEY: blockGroupType,
+ CHUNK_ITEM_KEY: chunkType,
+ DEV_EXTENT_KEY: devExtentType,
+ DEV_ITEM_KEY: devType,
+ DIR_INDEX_KEY: dirEntryType,
+ DIR_ITEM_KEY: dirEntryType,
+ EXTENT_CSUM_KEY: extentCSumType,
+ EXTENT_DATA_KEY: fileExtentType,
+ EXTENT_DATA_REF_KEY: extentDataRefType,
+ EXTENT_ITEM_KEY: extentType,
+ FREE_SPACE_BITMAP_KEY: freeSpaceBitmapType,
+ FREE_SPACE_EXTENT_KEY: emptyType,
+ FREE_SPACE_INFO_KEY: freeSpaceInfoType,
+ INODE_ITEM_KEY: inodeType,
+ INODE_REF_KEY: inodeRefsType,
+ METADATA_ITEM_KEY: metadataType,
+ ORPHAN_ITEM_KEY: emptyType,
+ PERSISTENT_ITEM_KEY: devStatsType,
+ QGROUP_INFO_KEY: qGroupInfoType,
+ QGROUP_LIMIT_KEY: qGroupLimitType,
+ QGROUP_RELATION_KEY: emptyType,
+ QGROUP_STATUS_KEY: qGroupStatusType,
+ ROOT_BACKREF_KEY: rootRefType,
+ ROOT_ITEM_KEY: rootType,
+ ROOT_REF_KEY: rootRefType,
+ SHARED_BLOCK_REF_KEY: emptyType,
+ SHARED_DATA_REF_KEY: sharedDataRefType,
+ TREE_BLOCK_REF_KEY: emptyType,
+ UUID_RECEIVED_SUBVOL_KEY: uuidMapType,
+ UUID_SUBVOL_KEY: uuidMapType,
+ XATTR_ITEM_KEY: dirEntryType,
}
+
+// untypedObjID2gotype is used by UnmarshalItem.
var untypedObjID2gotype = map[btrfsprim.ObjID]reflect.Type{
- btrfsprim.FREE_SPACE_OBJECTID: reflect.TypeOf(FreeSpaceHeader{}),
-}
-
-func (BlockGroup) isItem() {}
-func (Chunk) isItem() {}
-func (Dev) isItem() {}
-func (DevExtent) isItem() {}
-func (DevStats) isItem() {}
-func (DirEntry) isItem() {}
-func (Empty) isItem() {}
-func (Extent) isItem() {}
-func (ExtentCSum) isItem() {}
-func (ExtentDataRef) isItem() {}
-func (FileExtent) isItem() {}
-func (FreeSpaceBitmap) isItem() {}
-func (FreeSpaceHeader) isItem() {}
-func (FreeSpaceInfo) isItem() {}
-func (Inode) isItem() {}
-func (InodeRefs) isItem() {}
-func (Metadata) isItem() {}
-func (QGroupInfo) isItem() {}
-func (QGroupLimit) isItem() {}
-func (QGroupStatus) isItem() {}
-func (Root) isItem() {}
-func (RootRef) isItem() {}
-func (SharedDataRef) isItem() {}
-func (UUIDMap) isItem() {}
+ btrfsprim.FREE_SPACE_OBJECTID: freeSpaceHeaderType,
+}
+
+// Pools.
+var (
+ blockGroupPool = typedsync.Pool[Item]{New: func() Item { return new(BlockGroup) }}
+ chunkPool = typedsync.Pool[Item]{New: func() Item { return new(Chunk) }}
+ devPool = typedsync.Pool[Item]{New: func() Item { return new(Dev) }}
+ devExtentPool = typedsync.Pool[Item]{New: func() Item { return new(DevExtent) }}
+ devStatsPool = typedsync.Pool[Item]{New: func() Item { return new(DevStats) }}
+ dirEntryPool = typedsync.Pool[Item]{New: func() Item { return new(DirEntry) }}
+ emptyPool = typedsync.Pool[Item]{New: func() Item { return new(Empty) }}
+ extentPool = typedsync.Pool[Item]{New: func() Item { return new(Extent) }}
+ extentCSumPool = typedsync.Pool[Item]{New: func() Item { return new(ExtentCSum) }}
+ extentDataRefPool = typedsync.Pool[Item]{New: func() Item { return new(ExtentDataRef) }}
+ fileExtentPool = typedsync.Pool[Item]{New: func() Item { return new(FileExtent) }}
+ freeSpaceBitmapPool = typedsync.Pool[Item]{New: func() Item { return new(FreeSpaceBitmap) }}
+ freeSpaceHeaderPool = typedsync.Pool[Item]{New: func() Item { return new(FreeSpaceHeader) }}
+ freeSpaceInfoPool = typedsync.Pool[Item]{New: func() Item { return new(FreeSpaceInfo) }}
+ inodePool = typedsync.Pool[Item]{New: func() Item { return new(Inode) }}
+ inodeRefsPool = typedsync.Pool[Item]{New: func() Item { return new(InodeRefs) }}
+ metadataPool = typedsync.Pool[Item]{New: func() Item { return new(Metadata) }}
+ qGroupInfoPool = typedsync.Pool[Item]{New: func() Item { return new(QGroupInfo) }}
+ qGroupLimitPool = typedsync.Pool[Item]{New: func() Item { return new(QGroupLimit) }}
+ qGroupStatusPool = typedsync.Pool[Item]{New: func() Item { return new(QGroupStatus) }}
+ rootPool = typedsync.Pool[Item]{New: func() Item { return new(Root) }}
+ rootRefPool = typedsync.Pool[Item]{New: func() Item { return new(RootRef) }}
+ sharedDataRefPool = typedsync.Pool[Item]{New: func() Item { return new(SharedDataRef) }}
+ uuidMapPool = typedsync.Pool[Item]{New: func() Item { return new(UUIDMap) }}
+)
+
+// gotype2pool is used by UnmarshalItem.
+var gotype2pool = map[reflect.Type]*typedsync.Pool[Item]{
+ blockGroupType: &blockGroupPool,
+ chunkType: &chunkPool,
+ devType: &devPool,
+ devExtentType: &devExtentPool,
+ devStatsType: &devStatsPool,
+ dirEntryType: &dirEntryPool,
+ emptyType: &emptyPool,
+ extentType: &extentPool,
+ extentCSumType: &extentCSumPool,
+ extentDataRefType: &extentDataRefPool,
+ fileExtentType: &fileExtentPool,
+ freeSpaceBitmapType: &freeSpaceBitmapPool,
+ freeSpaceHeaderType: &freeSpaceHeaderPool,
+ freeSpaceInfoType: &freeSpaceInfoPool,
+ inodeType: &inodePool,
+ inodeRefsType: &inodeRefsPool,
+ metadataType: &metadataPool,
+ qGroupInfoType: &qGroupInfoPool,
+ qGroupLimitType: &qGroupLimitPool,
+ qGroupStatusType: &qGroupStatusPool,
+ rootType: &rootPool,
+ rootRefType: &rootRefPool,
+ sharedDataRefType: &sharedDataRefPool,
+ uuidMapType: &uuidMapPool,
+}
+
+// isItem implements Item.
+func (*BlockGroup) isItem() {}
+func (*Chunk) isItem() {}
+func (*Dev) isItem() {}
+func (*DevExtent) isItem() {}
+func (*DevStats) isItem() {}
+func (*DirEntry) isItem() {}
+func (*Empty) isItem() {}
+func (*Extent) isItem() {}
+func (*ExtentCSum) isItem() {}
+func (*ExtentDataRef) isItem() {}
+func (*FileExtent) isItem() {}
+func (*FreeSpaceBitmap) isItem() {}
+func (*FreeSpaceHeader) isItem() {}
+func (*FreeSpaceInfo) isItem() {}
+func (*Inode) isItem() {}
+func (*InodeRefs) isItem() {}
+func (*Metadata) isItem() {}
+func (*QGroupInfo) isItem() {}
+func (*QGroupLimit) isItem() {}
+func (*QGroupStatus) isItem() {}
+func (*Root) isItem() {}
+func (*RootRef) isItem() {}
+func (*SharedDataRef) isItem() {}
+func (*UUIDMap) isItem() {}
+
+// Free implements Item.
+func (o *BlockGroup) Free() { *o = BlockGroup{}; blockGroupPool.Put(o) }
+func (o *Dev) Free() { *o = Dev{}; devPool.Put(o) }
+func (o *DevExtent) Free() { *o = DevExtent{}; devExtentPool.Put(o) }
+func (o *DevStats) Free() { *o = DevStats{}; devStatsPool.Put(o) }
+func (o *Empty) Free() { *o = Empty{}; emptyPool.Put(o) }
+func (o *ExtentCSum) Free() { *o = ExtentCSum{}; extentCSumPool.Put(o) }
+func (o *ExtentDataRef) Free() { *o = ExtentDataRef{}; extentDataRefPool.Put(o) }
+func (o *FreeSpaceHeader) Free() { *o = FreeSpaceHeader{}; freeSpaceHeaderPool.Put(o) }
+func (o *FreeSpaceInfo) Free() { *o = FreeSpaceInfo{}; freeSpaceInfoPool.Put(o) }
+func (o *Inode) Free() { *o = Inode{}; inodePool.Put(o) }
+func (o *QGroupInfo) Free() { *o = QGroupInfo{}; qGroupInfoPool.Put(o) }
+func (o *QGroupLimit) Free() { *o = QGroupLimit{}; qGroupLimitPool.Put(o) }
+func (o *QGroupStatus) Free() { *o = QGroupStatus{}; qGroupStatusPool.Put(o) }
+func (o *Root) Free() { *o = Root{}; rootPool.Put(o) }
+func (o *SharedDataRef) Free() { *o = SharedDataRef{}; sharedDataRefPool.Put(o) }
+func (o *UUIDMap) Free() { *o = UUIDMap{}; uuidMapPool.Put(o) }
+
+// Clone is a handy method.
+func (o BlockGroup) Clone() BlockGroup { return o }
+func (o Dev) Clone() Dev { return o }
+func (o DevExtent) Clone() DevExtent { return o }
+func (o DevStats) Clone() DevStats { return o }
+func (o Empty) Clone() Empty { return o }
+func (o ExtentCSum) Clone() ExtentCSum { return o }
+func (o ExtentDataRef) Clone() ExtentDataRef { return o }
+func (o FreeSpaceHeader) Clone() FreeSpaceHeader { return o }
+func (o FreeSpaceInfo) Clone() FreeSpaceInfo { return o }
+func (o Inode) Clone() Inode { return o }
+func (o QGroupInfo) Clone() QGroupInfo { return o }
+func (o QGroupLimit) Clone() QGroupLimit { return o }
+func (o QGroupStatus) Clone() QGroupStatus { return o }
+func (o Root) Clone() Root { return o }
+func (o SharedDataRef) Clone() SharedDataRef { return o }
+func (o UUIDMap) Clone() UUIDMap { return o }
+
+// CloneItem implements Item.
+func (o *BlockGroup) CloneItem() Item {
+ ret, _ := blockGroupPool.Get()
+ *(ret.(*BlockGroup)) = o.Clone()
+ return ret
+}
+func (o *Chunk) CloneItem() Item { ret, _ := chunkPool.Get(); *(ret.(*Chunk)) = o.Clone(); return ret }
+func (o *Dev) CloneItem() Item { ret, _ := devPool.Get(); *(ret.(*Dev)) = o.Clone(); return ret }
+func (o *DevExtent) CloneItem() Item {
+ ret, _ := devExtentPool.Get()
+ *(ret.(*DevExtent)) = o.Clone()
+ return ret
+}
+func (o *DevStats) CloneItem() Item {
+ ret, _ := devStatsPool.Get()
+ *(ret.(*DevStats)) = o.Clone()
+ return ret
+}
+func (o *DirEntry) CloneItem() Item {
+ ret, _ := dirEntryPool.Get()
+ *(ret.(*DirEntry)) = o.Clone()
+ return ret
+}
+func (o *Empty) CloneItem() Item { ret, _ := emptyPool.Get(); *(ret.(*Empty)) = o.Clone(); return ret }
+func (o *Extent) CloneItem() Item {
+ ret, _ := extentPool.Get()
+ *(ret.(*Extent)) = o.Clone()
+ return ret
+}
+func (o *ExtentCSum) CloneItem() Item {
+ ret, _ := extentCSumPool.Get()
+ *(ret.(*ExtentCSum)) = o.Clone()
+ return ret
+}
+func (o *ExtentDataRef) CloneItem() Item {
+ ret, _ := extentDataRefPool.Get()
+ *(ret.(*ExtentDataRef)) = o.Clone()
+ return ret
+}
+func (o *FileExtent) CloneItem() Item {
+ ret, _ := fileExtentPool.Get()
+ *(ret.(*FileExtent)) = o.Clone()
+ return ret
+}
+func (o *FreeSpaceBitmap) CloneItem() Item {
+ ret, _ := freeSpaceBitmapPool.Get()
+ *(ret.(*FreeSpaceBitmap)) = o.Clone()
+ return ret
+}
+func (o *FreeSpaceHeader) CloneItem() Item {
+ ret, _ := freeSpaceHeaderPool.Get()
+ *(ret.(*FreeSpaceHeader)) = o.Clone()
+ return ret
+}
+func (o *FreeSpaceInfo) CloneItem() Item {
+ ret, _ := freeSpaceInfoPool.Get()
+ *(ret.(*FreeSpaceInfo)) = o.Clone()
+ return ret
+}
+func (o *Inode) CloneItem() Item { ret, _ := inodePool.Get(); *(ret.(*Inode)) = o.Clone(); return ret }
+func (o *InodeRefs) CloneItem() Item {
+ ret, _ := inodeRefsPool.Get()
+ *(ret.(*InodeRefs)) = o.Clone()
+ return ret
+}
+func (o *Metadata) CloneItem() Item {
+ ret, _ := metadataPool.Get()
+ *(ret.(*Metadata)) = o.Clone()
+ return ret
+}
+func (o *QGroupInfo) CloneItem() Item {
+ ret, _ := qGroupInfoPool.Get()
+ *(ret.(*QGroupInfo)) = o.Clone()
+ return ret
+}
+func (o *QGroupLimit) CloneItem() Item {
+ ret, _ := qGroupLimitPool.Get()
+ *(ret.(*QGroupLimit)) = o.Clone()
+ return ret
+}
+func (o *QGroupStatus) CloneItem() Item {
+ ret, _ := qGroupStatusPool.Get()
+ *(ret.(*QGroupStatus)) = o.Clone()
+ return ret
+}
+func (o *Root) CloneItem() Item { ret, _ := rootPool.Get(); *(ret.(*Root)) = o.Clone(); return ret }
+func (o *RootRef) CloneItem() Item {
+ ret, _ := rootRefPool.Get()
+ *(ret.(*RootRef)) = o.Clone()
+ return ret
+}
+func (o *SharedDataRef) CloneItem() Item {
+ ret, _ := sharedDataRefPool.Get()
+ *(ret.(*SharedDataRef)) = o.Clone()
+ return ret
+}
+func (o *UUIDMap) CloneItem() Item {
+ ret, _ := uuidMapPool.Get()
+ *(ret.(*UUIDMap)) = o.Clone()
+ return ret
+}
+
+// Item type assertions.
+var (
+ _ Item = (*BlockGroup)(nil)
+ _ Item = (*Chunk)(nil)
+ _ Item = (*Dev)(nil)
+ _ Item = (*DevExtent)(nil)
+ _ Item = (*DevStats)(nil)
+ _ Item = (*DirEntry)(nil)
+ _ Item = (*Empty)(nil)
+ _ Item = (*Extent)(nil)
+ _ Item = (*ExtentCSum)(nil)
+ _ Item = (*ExtentDataRef)(nil)
+ _ Item = (*FileExtent)(nil)
+ _ Item = (*FreeSpaceBitmap)(nil)
+ _ Item = (*FreeSpaceHeader)(nil)
+ _ Item = (*FreeSpaceInfo)(nil)
+ _ Item = (*Inode)(nil)
+ _ Item = (*InodeRefs)(nil)
+ _ Item = (*Metadata)(nil)
+ _ Item = (*QGroupInfo)(nil)
+ _ Item = (*QGroupLimit)(nil)
+ _ Item = (*QGroupStatus)(nil)
+ _ Item = (*Root)(nil)
+ _ Item = (*RootRef)(nil)
+ _ Item = (*SharedDataRef)(nil)
+ _ Item = (*UUIDMap)(nil)
+)
+
+// Clone type assertions.
+var (
+ _ interface{ Clone() BlockGroup } = BlockGroup{}
+ _ interface{ Clone() Chunk } = Chunk{}
+ _ interface{ Clone() Dev } = Dev{}
+ _ interface{ Clone() DevExtent } = DevExtent{}
+ _ interface{ Clone() DevStats } = DevStats{}
+ _ interface{ Clone() DirEntry } = DirEntry{}
+ _ interface{ Clone() Empty } = Empty{}
+ _ interface{ Clone() Extent } = Extent{}
+ _ interface{ Clone() ExtentCSum } = ExtentCSum{}
+ _ interface{ Clone() ExtentDataRef } = ExtentDataRef{}
+ _ interface{ Clone() FileExtent } = FileExtent{}
+ _ interface{ Clone() FreeSpaceBitmap } = FreeSpaceBitmap{}
+ _ interface{ Clone() FreeSpaceHeader } = FreeSpaceHeader{}
+ _ interface{ Clone() FreeSpaceInfo } = FreeSpaceInfo{}
+ _ interface{ Clone() Inode } = Inode{}
+ _ interface{ Clone() InodeRefs } = InodeRefs{}
+ _ interface{ Clone() Metadata } = Metadata{}
+ _ interface{ Clone() QGroupInfo } = QGroupInfo{}
+ _ interface{ Clone() QGroupLimit } = QGroupLimit{}
+ _ interface{ Clone() QGroupStatus } = QGroupStatus{}
+ _ interface{ Clone() Root } = Root{}
+ _ interface{ Clone() RootRef } = RootRef{}
+ _ interface{ Clone() SharedDataRef } = SharedDataRef{}
+ _ interface{ Clone() UUIDMap } = UUIDMap{}
+)
diff --git a/lib/btrfs/btrfstree/ops.go b/lib/btrfs/btrfstree/ops.go
index cdacef9..b01312f 100644
--- a/lib/btrfs/btrfstree/ops.go
+++ b/lib/btrfs/btrfstree/ops.go
@@ -144,6 +144,7 @@ func (fs TreeOperatorImpl) treeWalk(ctx context.Context, path TreePath, errHandl
}
}
node, err := fs.ReadNode(path)
+ defer FreeNodeRef(node)
if ctx.Err() != nil {
return
}
@@ -207,7 +208,7 @@ func (fs TreeOperatorImpl) treeWalk(ctx context.Context, path TreePath, errHandl
ToKey: item.Key,
ToMaxKey: item.Key,
})
- if errBody, isErr := item.Body.(btrfsitem.Error); isErr {
+ if errBody, isErr := item.Body.(*btrfsitem.Error); isErr {
if cbs.BadItem == nil {
errHandle(&TreeError{Path: itemPath, Err: errBody.Err})
} else {
@@ -255,6 +256,7 @@ func (fs TreeOperatorImpl) treeSearch(treeRoot TreeRoot, fn func(btrfsprim.Key,
}
node, err := fs.ReadNode(path)
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
@@ -273,6 +275,7 @@ func (fs TreeOperatorImpl) treeSearch(treeRoot TreeRoot, fn func(btrfsprim.Key,
return slices.Min(fn(kp.Key, math.MaxUint32), 0) // don't return >0; a key can't be "too low"
})
if !ok {
+ FreeNodeRef(node)
return nil, nil, iofs.ErrNotExist
}
toMaxKey := path.Node(-1).ToMaxKey
@@ -288,6 +291,7 @@ func (fs TreeOperatorImpl) treeSearch(treeRoot TreeRoot, fn func(btrfsprim.Key,
ToKey: node.Data.BodyInternal[lastGood].Key,
ToMaxKey: toMaxKey,
})
+ FreeNodeRef(node)
} else {
// leaf node
@@ -305,6 +309,7 @@ func (fs TreeOperatorImpl) treeSearch(treeRoot TreeRoot, fn func(btrfsprim.Key,
return fn(item.Key, item.BodySize)
})
if !ok {
+ FreeNodeRef(node)
return nil, nil, iofs.ErrNotExist
}
path = append(path, TreePathElem{
@@ -333,8 +338,10 @@ func (fs TreeOperatorImpl) prev(path TreePath, node *diskio.Ref[btrfsvol.Logical
path.Node(-1).FromItemIdx--
if path.Node(-1).ToNodeAddr != 0 {
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
path.Node(-1).ToNodeAddr = node.Data.BodyInternal[path.Node(-1).FromItemIdx].BlockPtr
@@ -343,8 +350,10 @@ func (fs TreeOperatorImpl) prev(path TreePath, node *diskio.Ref[btrfsvol.Logical
// go down
for path.Node(-1).ToNodeAddr != 0 {
if node.Addr != path.Node(-1).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path)
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
}
@@ -369,8 +378,10 @@ func (fs TreeOperatorImpl) prev(path TreePath, node *diskio.Ref[btrfsvol.Logical
}
// return
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
}
@@ -383,8 +394,10 @@ func (fs TreeOperatorImpl) next(path TreePath, node *diskio.Ref[btrfsvol.Logical
// go up
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
path.Node(-2).ToNodeLevel = node.Data.Head.Level
@@ -395,8 +408,10 @@ func (fs TreeOperatorImpl) next(path TreePath, node *diskio.Ref[btrfsvol.Logical
return nil, nil, nil
}
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
path.Node(-2).ToNodeLevel = node.Data.Head.Level
@@ -406,8 +421,10 @@ func (fs TreeOperatorImpl) next(path TreePath, node *diskio.Ref[btrfsvol.Logical
path.Node(-1).FromItemIdx++
if path.Node(-1).ToNodeAddr != 0 {
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
path.Node(-1).ToNodeAddr = node.Data.BodyInternal[path.Node(-1).FromItemIdx].BlockPtr
@@ -416,8 +433,10 @@ func (fs TreeOperatorImpl) next(path TreePath, node *diskio.Ref[btrfsvol.Logical
// go down
for path.Node(-1).ToNodeAddr != 0 {
if node.Addr != path.Node(-1).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path)
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
path.Node(-1).ToNodeLevel = node.Data.Head.Level
@@ -447,8 +466,10 @@ func (fs TreeOperatorImpl) next(path TreePath, node *diskio.Ref[btrfsvol.Logical
}
// return
if node.Addr != path.Node(-2).ToNodeAddr {
+ FreeNodeRef(node)
node, err = fs.ReadNode(path.Parent())
if err != nil {
+ FreeNodeRef(node)
return nil, nil, err
}
}
@@ -469,7 +490,10 @@ func (fs TreeOperatorImpl) TreeSearch(treeID btrfsprim.ObjID, fn func(btrfsprim.
if err != nil {
return Item{}, err
}
- return node.Data.BodyLeaf[path.Node(-1).FromItemIdx], nil
+ item := node.Data.BodyLeaf[path.Node(-1).FromItemIdx]
+ item.Body = item.Body.CloneItem()
+ FreeNodeRef(node)
+ return item, nil
}
// KeySearch returns a comparator suitable to be passed to TreeSearch.
@@ -506,7 +530,8 @@ func (fs TreeOperatorImpl) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfspr
ret := []Item{middleItem}
var errs derror.MultiError
- for prevPath, prevNode := middlePath, middleNode; true; {
+ prevPath, prevNode := middlePath, middleNode
+ for {
prevPath, prevNode, err = fs.prev(prevPath, prevNode)
if err != nil {
errs = append(errs, err)
@@ -519,10 +544,21 @@ func (fs TreeOperatorImpl) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfspr
if fn(prevItem.Key, prevItem.BodySize) != 0 {
break
}
- ret = append(ret, prevItem)
+ item := prevItem
+ item.Body = item.Body.CloneItem()
+ ret = append(ret, item)
}
slices.Reverse(ret)
- for nextPath, nextNode := middlePath, middleNode; true; {
+ if prevNode.Addr != middlePath.Node(-1).ToNodeAddr {
+ FreeNodeRef(prevNode)
+ middleNode, err = fs.ReadNode(middlePath)
+ if err != nil {
+ FreeNodeRef(middleNode)
+ return nil, err
+ }
+ }
+ nextPath, nextNode := middlePath, middleNode
+ for {
nextPath, nextNode, err = fs.next(nextPath, nextNode)
if err != nil {
errs = append(errs, err)
@@ -535,8 +571,11 @@ func (fs TreeOperatorImpl) TreeSearchAll(treeID btrfsprim.ObjID, fn func(btrfspr
if fn(nextItem.Key, nextItem.BodySize) != 0 {
break
}
- ret = append(ret, nextItem)
+ item := nextItem
+ item.Body = item.Body.CloneItem()
+ ret = append(ret, item)
}
+ FreeNodeRef(nextNode)
if errs != nil {
err = errs
}
diff --git a/lib/btrfs/btrfstree/root.go b/lib/btrfs/btrfstree/root.go
index 319904b..ace2b49 100644
--- a/lib/btrfs/btrfstree/root.go
+++ b/lib/btrfs/btrfstree/root.go
@@ -72,14 +72,14 @@ func LookupTreeRoot(fs TreeOperator, sb Superblock, treeID btrfsprim.ObjID) (*Tr
return nil, err
}
switch rootItemBody := rootItem.Body.(type) {
- case btrfsitem.Root:
+ case *btrfsitem.Root:
return &TreeRoot{
TreeID: treeID,
RootNode: rootItemBody.ByteNr,
Level: rootItemBody.Level,
Generation: rootItemBody.Generation,
}, nil
- case btrfsitem.Error:
+ case *btrfsitem.Error:
return nil, fmt.Errorf("malformed ROOT_ITEM for tree %v: %w", treeID, rootItemBody.Err)
default:
panic(fmt.Errorf("should not happen: ROOT_ITEM has unexpected item type: %T", rootItemBody))
diff --git a/lib/btrfs/btrfstree/types_node.go b/lib/btrfs/btrfstree/types_node.go
index d9d7118..fd4c939 100644
--- a/lib/btrfs/btrfstree/types_node.go
+++ b/lib/btrfs/btrfstree/types_node.go
@@ -8,7 +8,9 @@ import (
"encoding/binary"
"errors"
"fmt"
+ "unsafe"
+ "git.lukeshu.com/go/typedsync"
"github.com/datawire/dlib/derror"
"git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
@@ -21,6 +23,13 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/fmtutil"
)
+var (
+ nodeHeaderSize = binstruct.StaticSize(NodeHeader{})
+ keyPointerSize = binstruct.StaticSize(KeyPointer{})
+ itemHeaderSize = binstruct.StaticSize(ItemHeader{})
+ csumSize = binstruct.StaticSize(btrfssum.CSum{})
+)
+
type NodeFlags uint64
const sizeofNodeFlags = 7
@@ -103,11 +112,11 @@ type NodeHeader struct {
// MaxItems returns the maximum possible valid value of
// .Head.NumItems.
func (node Node) MaxItems() uint32 {
- bodyBytes := node.Size - uint32(binstruct.StaticSize(NodeHeader{}))
+ bodyBytes := node.Size - uint32(nodeHeaderSize)
if node.Head.Level > 0 {
- return bodyBytes / uint32(binstruct.StaticSize(KeyPointer{}))
+ return bodyBytes / uint32(keyPointerSize)
} else {
- return bodyBytes / uint32(binstruct.StaticSize(ItemHeader{}))
+ return bodyBytes / uint32(itemHeaderSize)
}
}
@@ -144,7 +153,7 @@ func (node Node) CalculateChecksum() (btrfssum.CSum, error) {
if err != nil {
return btrfssum.CSum{}, err
}
- return node.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+ return node.ChecksumType.Sum(data[csumSize:])
}
func (node Node) ValidateChecksum() error {
@@ -165,17 +174,16 @@ func (node *Node) UnmarshalBinary(nodeBuf []byte) (int, error) {
Size: uint32(len(nodeBuf)),
ChecksumType: node.ChecksumType,
}
- if len(nodeBuf) <= binstruct.StaticSize(NodeHeader{}) {
+ if len(nodeBuf) <= nodeHeaderSize {
return 0, fmt.Errorf("size must be greater than %v, but is %v",
- binstruct.StaticSize(NodeHeader{}),
- len(nodeBuf))
+ nodeHeaderSize, len(nodeBuf))
}
n, err := binstruct.Unmarshal(nodeBuf, &node.Head)
if err != nil {
return n, err
- } else if n != binstruct.StaticSize(NodeHeader{}) {
+ } else if n != nodeHeaderSize {
return n, fmt.Errorf("header consumed %v bytes but expected %v",
- n, binstruct.StaticSize(NodeHeader{}))
+ n, nodeHeaderSize)
}
if node.Head.Level > 0 {
_n, err := node.unmarshalInternal(nodeBuf[n:])
@@ -201,10 +209,9 @@ func (node Node) MarshalBinary() ([]byte, error) {
if node.Size == 0 {
return nil, fmt.Errorf(".Size must be set")
}
- if node.Size <= uint32(binstruct.StaticSize(NodeHeader{})) {
+ if node.Size <= uint32(nodeHeaderSize) {
return nil, fmt.Errorf(".Size must be greater than %v, but is %v",
- binstruct.StaticSize(NodeHeader{}),
- node.Size)
+ nodeHeaderSize, node.Size)
}
if node.Head.Level > 0 {
node.Head.NumItems = uint32(len(node.BodyInternal))
@@ -217,19 +224,19 @@ func (node Node) MarshalBinary() ([]byte, error) {
if bs, err := binstruct.Marshal(node.Head); err != nil {
return buf, err
} else {
- if len(bs) != binstruct.StaticSize(NodeHeader{}) {
+ if len(bs) != nodeHeaderSize {
return nil, fmt.Errorf("header is %v bytes but expected %v",
- len(bs), binstruct.StaticSize(NodeHeader{}))
+ len(bs), nodeHeaderSize)
}
copy(buf, bs)
}
if node.Head.Level > 0 {
- if err := node.marshalInternalTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ if err := node.marshalInternalTo(buf[nodeHeaderSize:]); err != nil {
return buf, err
}
} else {
- if err := node.marshalLeafTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ if err := node.marshalLeafTo(buf[nodeHeaderSize:]); err != nil {
return buf, err
}
}
@@ -248,14 +255,13 @@ type KeyPointer struct {
func (node *Node) unmarshalInternal(bodyBuf []byte) (int, error) {
n := 0
- for i := uint32(0); i < node.Head.NumItems; i++ {
- var item KeyPointer
- _n, err := binstruct.Unmarshal(bodyBuf[n:], &item)
+ node.BodyInternal = make([]KeyPointer, node.Head.NumItems)
+ for i := range node.BodyInternal {
+ _n, err := binstruct.Unmarshal(bodyBuf[n:], &node.BodyInternal[i])
n += _n
if err != nil {
return n, fmt.Errorf("item %v: %w", i, err)
}
- node.BodyInternal = append(node.BodyInternal, item)
}
node.Padding = bodyBuf[n:]
return len(bodyBuf), nil
@@ -296,11 +302,24 @@ type ItemHeader struct {
binstruct.End `bin:"off=0x19"`
}
+var itemPool containers.SlicePool[Item]
+
+func (node *Node) Free() {
+ for i := range node.BodyLeaf {
+ node.BodyLeaf[i].Body.Free()
+ node.BodyLeaf[i] = Item{}
+ }
+ itemPool.Put(node.BodyLeaf)
+ *node = Node{}
+}
+
func (node *Node) unmarshalLeaf(bodyBuf []byte) (int, error) {
head := 0
tail := len(bodyBuf)
- for i := uint32(0); i < node.Head.NumItems; i++ {
- var itemHead ItemHeader
+ node.BodyLeaf = itemPool.Get(int(node.Head.NumItems))
+ var itemHead ItemHeader
+ for i := range node.BodyLeaf {
+ itemHead = ItemHeader{} // zero it out
n, err := binstruct.Unmarshal(bodyBuf[head:], &itemHead)
head += n
if err != nil {
@@ -324,11 +343,11 @@ func (node *Node) unmarshalLeaf(bodyBuf []byte) (int, error) {
tail = dataOff
dataBuf := bodyBuf[dataOff : dataOff+dataSize]
- node.BodyLeaf = append(node.BodyLeaf, Item{
+ node.BodyLeaf[i] = Item{
Key: itemHead.Key,
BodySize: itemHead.DataSize,
Body: btrfsitem.UnmarshalItem(itemHead.Key, node.ChecksumType, dataBuf),
- })
+ }
}
node.Padding = bodyBuf[head:tail]
@@ -374,9 +393,9 @@ func (node *Node) LeafFreeSpace() uint32 {
panic(fmt.Errorf("Node.LeafFreeSpace: not a leaf node"))
}
freeSpace := node.Size
- freeSpace -= uint32(binstruct.StaticSize(NodeHeader{}))
+ freeSpace -= uint32(nodeHeaderSize)
for _, item := range node.BodyLeaf {
- freeSpace -= uint32(binstruct.StaticSize(ItemHeader{}))
+ freeSpace -= uint32(itemHeaderSize)
bs, _ := binstruct.Marshal(item.Body)
freeSpace -= uint32(len(bs))
}
@@ -416,26 +435,49 @@ type IOError struct {
func (e *IOError) Error() string { return "i/o error: " + e.Err.Error() }
func (e *IOError) Unwrap() error { return e.Err }
+var bytePool containers.SlicePool[byte]
+
+var nodePool = typedsync.Pool[*diskio.Ref[int64, Node]]{
+ New: func() *diskio.Ref[int64, Node] {
+ return new(diskio.Ref[int64, Node])
+ },
+}
+
+func FreeNodeRef[Addr ~int64](ref *diskio.Ref[Addr, Node]) {
+ if ref == nil {
+ return
+ }
+ ref.Data.Free()
+ nodePool.Put((*diskio.Ref[int64, Node])(unsafe.Pointer(ref))) //nolint:gosec // I know it's unsafe.
+}
+
+func newNodeRef[Addr ~int64]() *diskio.Ref[Addr, Node] {
+ ret, _ := nodePool.Get()
+ return (*diskio.Ref[Addr, Node])(unsafe.Pointer(ret)) //nolint:gosec // I know it's unsafe.
+}
+
// It is possible that both a non-nil diskio.Ref and an error are
// returned. The error returned (if non-nil) is always of type
// *NodeError[Addr]. Notable errors that may be inside of the
// NodeError are ErrNotANode and *IOError.
func ReadNode[Addr ~int64](fs diskio.File[Addr], sb Superblock, addr Addr, exp NodeExpectations) (*diskio.Ref[Addr, Node], error) {
- if int(sb.NodeSize) < binstruct.StaticSize(NodeHeader{}) {
+ if int(sb.NodeSize) < nodeHeaderSize {
return nil, &NodeError[Addr]{
Op: "btrfstree.ReadNode", NodeAddr: addr,
Err: fmt.Errorf("superblock.NodeSize=%v is too small to contain even a node header (%v bytes)",
- sb.NodeSize, binstruct.StaticSize(NodeHeader{})),
+ sb.NodeSize, nodeHeaderSize),
}
}
- nodeBuf := make([]byte, sb.NodeSize)
+ nodeBuf := bytePool.Get(int(sb.NodeSize))
if _, err := fs.ReadAt(nodeBuf, addr); err != nil {
+ bytePool.Put(nodeBuf)
return nil, &NodeError[Addr]{Op: "btrfstree.ReadNode", NodeAddr: addr, Err: &IOError{Err: err}}
}
// parse (early)
- nodeRef := &diskio.Ref[Addr, Node]{
+ nodeRef := newNodeRef[Addr]()
+ *nodeRef = diskio.Ref[Addr, Node]{
File: fs,
Addr: addr,
Data: Node{
@@ -453,15 +495,18 @@ func ReadNode[Addr ~int64](fs diskio.File[Addr], sb Superblock, addr Addr, exp N
// sanity checking (that prevents the main parse)
if nodeRef.Data.Head.MetadataUUID != sb.EffectiveMetadataUUID() {
+ bytePool.Put(nodeBuf)
return nodeRef, &NodeError[Addr]{Op: "btrfstree.ReadNode", NodeAddr: addr, Err: ErrNotANode}
}
stored := nodeRef.Data.Head.Checksum
- calced, err := nodeRef.Data.ChecksumType.Sum(nodeBuf[binstruct.StaticSize(btrfssum.CSum{}):])
+ calced, err := nodeRef.Data.ChecksumType.Sum(nodeBuf[csumSize:])
if err != nil {
+ bytePool.Put(nodeBuf)
return nodeRef, &NodeError[Addr]{Op: "btrfstree.ReadNode", NodeAddr: addr, Err: err}
}
if stored != calced {
+ bytePool.Put(nodeBuf)
return nodeRef, &NodeError[Addr]{
Op: "btrfstree.ReadNode", NodeAddr: addr,
Err: fmt.Errorf("looks like a node but is corrupt: checksum mismatch: stored=%v calculated=%v",
@@ -481,9 +526,12 @@ func ReadNode[Addr ~int64](fs diskio.File[Addr], sb Superblock, addr Addr, exp N
// isn't useful.
if _, err := binstruct.Unmarshal(nodeBuf, &nodeRef.Data); err != nil {
+ bytePool.Put(nodeBuf)
return nodeRef, &NodeError[Addr]{Op: "btrfstree.ReadNode", NodeAddr: addr, Err: err}
}
+ bytePool.Put(nodeBuf)
+
// sanity checking (that doesn't prevent parsing)
var errs derror.MultiError
diff --git a/lib/btrfs/btrfstree/types_superblock.go b/lib/btrfs/btrfstree/types_superblock.go
index 9258f9a..140d4a1 100644
--- a/lib/btrfs/btrfstree/types_superblock.go
+++ b/lib/btrfs/btrfstree/types_superblock.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -81,7 +81,7 @@ func (sb Superblock) CalculateChecksum() (btrfssum.CSum, error) {
if err != nil {
return btrfssum.CSum{}, err
}
- return sb.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+ return sb.ChecksumType.Sum(data[csumSize:])
}
func (sb Superblock) ValidateChecksum() error {
diff --git a/lib/btrfs/btrfsvol/lvm.go b/lib/btrfs/btrfsvol/lvm.go
index 93ec438..51e2263 100644
--- a/lib/btrfs/btrfsvol/lvm.go
+++ b/lib/btrfs/btrfsvol/lvm.go
@@ -345,21 +345,23 @@ func (lv *LogicalVolume[PhysicalVolume]) maybeShortReadAt(dat []byte, laddr Logi
dat = dat[:maxlen]
}
- buf := make([]byte, len(dat))
+ buf := dat
first := true
for paddr := range paddrs {
dev, ok := lv.id2pv[paddr.Dev]
if !ok {
return 0, fmt.Errorf("device=%v does not exist", paddr.Dev)
}
+ if !first {
+ buf = make([]byte, len(buf))
+ }
if _, err := dev.ReadAt(buf, paddr.Addr); err != nil {
return 0, fmt.Errorf("read device=%v paddr=%v: %w", paddr.Dev, paddr.Addr, err)
}
- if first {
- copy(dat, buf)
- } else if !bytes.Equal(dat, buf) {
+ if !first && !bytes.Equal(dat, buf) {
return 0, fmt.Errorf("inconsistent stripes at laddr=%v len=%v", laddr, len(dat))
}
+ first = false
}
return len(dat), nil
}
diff --git a/lib/btrfs/csums.go b/lib/btrfs/csums.go
index a32f090..9e0b755 100644
--- a/lib/btrfs/csums.go
+++ b/lib/btrfs/csums.go
@@ -12,23 +12,31 @@ import (
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfstree"
"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/containers"
"git.lukeshu.com/btrfs-progs-ng/lib/diskio"
)
+// dat doesn't escape to the heap in .ReadAt(dat, …), but the compiler
+// can't figure that out, so we use a Pool for our byte arrays, since
+// the compiler won't let us allocate them on the stack.
+var blockPool containers.SlicePool[byte]
+
func ChecksumLogical(fs diskio.File[btrfsvol.LogicalAddr], alg btrfssum.CSumType, laddr btrfsvol.LogicalAddr) (btrfssum.CSum, error) {
- var dat [btrfssum.BlockSize]byte
- if _, err := fs.ReadAt(dat[:], laddr); err != nil {
+ dat := blockPool.Get(btrfssum.BlockSize)
+ defer blockPool.Put(dat)
+ if _, err := fs.ReadAt(dat, laddr); err != nil {
return btrfssum.CSum{}, err
}
- return alg.Sum(dat[:])
+ return alg.Sum(dat)
}
func ChecksumPhysical(dev *Device, alg btrfssum.CSumType, paddr btrfsvol.PhysicalAddr) (btrfssum.CSum, error) {
- var dat [btrfssum.BlockSize]byte
- if _, err := dev.ReadAt(dat[:], paddr); err != nil {
+ dat := blockPool.Get(btrfssum.BlockSize)
+ defer blockPool.Put(dat)
+ if _, err := dev.ReadAt(dat, paddr); err != nil {
return btrfssum.CSum{}, err
}
- return alg.Sum(dat[:])
+ return alg.Sum(dat)
}
func ChecksumQualifiedPhysical(fs *FS, alg btrfssum.CSumType, paddr btrfsvol.QualifiedPhysicalAddr) (btrfssum.CSum, error) {
@@ -60,9 +68,9 @@ func LookupCSum(fs btrfstree.TreeOperator, alg btrfssum.CSumType, laddr btrfsvol
return btrfssum.SumRun[btrfsvol.LogicalAddr]{}, fmt.Errorf("item type is %v, not EXTENT_CSUM", item.Key.ItemType)
}
switch body := item.Body.(type) {
- case btrfsitem.ExtentCSum:
+ case *btrfsitem.ExtentCSum:
return body.SumRun, nil
- case btrfsitem.Error:
+ case *btrfsitem.Error:
return btrfssum.SumRun[btrfsvol.LogicalAddr]{}, body.Err
default:
panic(fmt.Errorf("should not happen: EXTENT_CSUM has unexpected item type: %T", body))
diff --git a/lib/btrfs/io1_pv.go b/lib/btrfs/io1_pv.go
index 0e7cd9c..72d33f5 100644
--- a/lib/btrfs/io1_pv.go
+++ b/lib/btrfs/io1_pv.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -28,11 +28,13 @@ var SuperblockAddrs = []btrfsvol.PhysicalAddr{
0x40_0000_0000, // 256GiB
}
+var superblockSize = binstruct.StaticSize(btrfstree.Superblock{})
+
func (dev *Device) Superblocks() ([]*diskio.Ref[btrfsvol.PhysicalAddr, btrfstree.Superblock], error) {
if dev.cacheSuperblocks != nil {
return dev.cacheSuperblocks, nil
}
- superblockSize := btrfsvol.PhysicalAddr(binstruct.StaticSize(btrfstree.Superblock{}))
+ superblockSize := btrfsvol.PhysicalAddr(superblockSize)
sz := dev.Size()
diff --git a/lib/btrfs/io2_lv.go b/lib/btrfs/io2_lv.go
index ac7ea70..856ac20 100644
--- a/lib/btrfs/io2_lv.go
+++ b/lib/btrfs/io2_lv.go
@@ -173,13 +173,13 @@ func (fs *FS) initDev(ctx context.Context, sb btrfstree.Superblock) error {
return nil
}
switch itemBody := item.Body.(type) {
- case btrfsitem.Chunk:
+ case *btrfsitem.Chunk:
for _, mapping := range itemBody.Mappings(item.Key) {
if err := fs.LV.AddMapping(mapping); err != nil {
return err
}
}
- case btrfsitem.Error:
+ case *btrfsitem.Error:
// do nothing
default:
// This is a panic because the item decoder should not emit CHUNK_ITEM items as
diff --git a/lib/btrfs/io3_btree.go b/lib/btrfs/io3_btree.go
index 8ec4b41..18df98e 100644
--- a/lib/btrfs/io3_btree.go
+++ b/lib/btrfs/io3_btree.go
@@ -1,4 +1,4 @@
-// Copyright (C) 2022 Luke Shumaker <lukeshu@lukeshu.com>
+// Copyright (C) 2022-2023 Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later
@@ -43,7 +43,7 @@ func (fs *FS) populateTreeUUIDs(ctx context.Context) {
},
btrfstree.TreeWalkHandler{
Item: func(_ btrfstree.TreePath, item btrfstree.Item) error {
- itemBody, ok := item.Body.(btrfsitem.Root)
+ itemBody, ok := item.Body.(*btrfsitem.Root)
if !ok {
return nil
}
diff --git a/lib/btrfs/io4_fs.go b/lib/btrfs/io4_fs.go
index fce9c76..56cf266 100644
--- a/lib/btrfs/io4_fs.go
+++ b/lib/btrfs/io4_fs.go
@@ -75,10 +75,10 @@ type Subvolume struct {
rootVal btrfsitem.Root
rootErr error
- bareInodeCache *containers.LRUCache[btrfsprim.ObjID, *BareInode]
- fullInodeCache *containers.LRUCache[btrfsprim.ObjID, *FullInode]
- dirCache *containers.LRUCache[btrfsprim.ObjID, *Dir]
- fileCache *containers.LRUCache[btrfsprim.ObjID, *File]
+ bareInodeCache containers.ARCache[btrfsprim.ObjID, *BareInode]
+ fullInodeCache containers.ARCache[btrfsprim.ObjID, *FullInode]
+ dirCache containers.ARCache[btrfsprim.ObjID, *Dir]
+ fileCache containers.ARCache[btrfsprim.ObjID, *File]
}
func (sv *Subvolume) init() {
@@ -88,19 +88,19 @@ func (sv *Subvolume) init() {
sv.rootErr = err
} else {
switch rootBody := root.Body.(type) {
- case btrfsitem.Root:
- sv.rootVal = rootBody
- case btrfsitem.Error:
+ case *btrfsitem.Root:
+ sv.rootVal = rootBody.Clone()
+ case *btrfsitem.Error:
sv.rootErr = fmt.Errorf("FS_TREE ROOT_ITEM has malformed body: %w", rootBody.Err)
default:
panic(fmt.Errorf("should not happen: ROOT_ITEM has unexpected item type: %T", rootBody))
}
}
- sv.bareInodeCache = containers.NewLRUCache[btrfsprim.ObjID, *BareInode](textui.Tunable(128))
- sv.fullInodeCache = containers.NewLRUCache[btrfsprim.ObjID, *FullInode](textui.Tunable(128))
- sv.dirCache = containers.NewLRUCache[btrfsprim.ObjID, *Dir](textui.Tunable(128))
- sv.fileCache = containers.NewLRUCache[btrfsprim.ObjID, *File](textui.Tunable(128))
+ sv.bareInodeCache.MaxLen = textui.Tunable(128)
+ sv.fullInodeCache.MaxLen = textui.Tunable(128)
+ sv.dirCache.MaxLen = textui.Tunable(128)
+ sv.fileCache.MaxLen = textui.Tunable(128)
})
}
@@ -111,7 +111,7 @@ func (sv *Subvolume) GetRootInode() (btrfsprim.ObjID, error) {
func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
sv.init()
- val := sv.bareInodeCache.GetOrElse(inode, func() (val *BareInode) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *BareInode](&sv.bareInodeCache, inode, func(inode btrfsprim.ObjID) (val *BareInode) {
val = &BareInode{
Inode: inode,
}
@@ -126,9 +126,10 @@ func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
}
switch itemBody := item.Body.(type) {
- case btrfsitem.Inode:
- val.InodeItem = &itemBody
- case btrfsitem.Error:
+ case *btrfsitem.Inode:
+ bodyCopy := itemBody.Clone()
+ val.InodeItem = &bodyCopy
+ case *btrfsitem.Error:
val.Errs = append(val.Errs, fmt.Errorf("malformed inode: %w", itemBody.Err))
default:
panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
@@ -144,7 +145,7 @@ func (sv *Subvolume) LoadBareInode(inode btrfsprim.ObjID) (*BareInode, error) {
func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
sv.init()
- val := sv.fullInodeCache.GetOrElse(inode, func() (val *FullInode) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *FullInode](&sv.fullInodeCache, inode, func(indoe btrfsprim.ObjID) (val *FullInode) {
val = &FullInode{
BareInode: BareInode{
Inode: inode,
@@ -164,24 +165,25 @@ func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
switch item.Key.ItemType {
case btrfsitem.INODE_ITEM_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.Inode:
+ case *btrfsitem.Inode:
if val.InodeItem != nil {
if !reflect.DeepEqual(itemBody, *val.InodeItem) {
val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
}
continue
}
- val.InodeItem = &itemBody
- case btrfsitem.Error:
+ bodyCopy := itemBody.Clone()
+ val.InodeItem = &bodyCopy
+ case *btrfsitem.Error:
val.Errs = append(val.Errs, fmt.Errorf("malformed INODE_ITEM: %w", itemBody.Err))
default:
panic(fmt.Errorf("should not happen: INODE_ITEM has unexpected item type: %T", itemBody))
}
case btrfsitem.XATTR_ITEM_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
val.XAttrs[string(itemBody.Name)] = string(itemBody.Data)
- case btrfsitem.Error:
+ case *btrfsitem.Error:
val.Errs = append(val.Errs, fmt.Errorf("malformed XATTR_ITEM: %w", itemBody.Err))
default:
panic(fmt.Errorf("should not happen: XATTR_ITEM has unexpected item type: %T", itemBody))
@@ -200,7 +202,7 @@ func (sv *Subvolume) LoadFullInode(inode btrfsprim.ObjID) (*FullInode, error) {
func (sv *Subvolume) LoadDir(inode btrfsprim.ObjID) (*Dir, error) {
sv.init()
- val := sv.dirCache.GetOrElse(inode, func() (val *Dir) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *Dir](&sv.dirCache, inode, func(inode btrfsprim.ObjID) (val *Dir) {
val = new(Dir)
fullInode, err := sv.LoadFullInode(inode)
if err != nil {
@@ -225,15 +227,15 @@ func (dir *Dir) populate() {
switch item.Key.ItemType {
case btrfsitem.INODE_REF_KEY:
switch body := item.Body.(type) {
- case btrfsitem.InodeRefs:
- if len(body) != 1 {
+ case *btrfsitem.InodeRefs:
+ if len(body.Refs) != 1 {
dir.Errs = append(dir.Errs, fmt.Errorf("INODE_REF item with %d entries on a directory",
- len(body)))
+ len(body.Refs)))
continue
}
ref := InodeRef{
Inode: btrfsprim.ObjID(item.Key.Offset),
- InodeRef: body[0],
+ InodeRef: body.Refs[0],
}
if dir.DotDot != nil {
if !reflect.DeepEqual(ref, *dir.DotDot) {
@@ -242,14 +244,14 @@ func (dir *Dir) populate() {
continue
}
dir.DotDot = &ref
- case btrfsitem.Error:
+ case *btrfsitem.Error:
dir.Errs = append(dir.Errs, fmt.Errorf("malformed INODE_REF: %w", body.Err))
default:
panic(fmt.Errorf("should not happen: INODE_REF has unexpected item type: %T", body))
}
case btrfsitem.DIR_ITEM_KEY:
switch entry := item.Body.(type) {
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
namehash := btrfsitem.NameHash(entry.Name)
if namehash != item.Key.Offset {
dir.Errs = append(dir.Errs, fmt.Errorf("direntry crc32c mismatch: key=%#x crc32c(%q)=%#x",
@@ -262,8 +264,8 @@ func (dir *Dir) populate() {
}
continue
}
- dir.ChildrenByName[string(entry.Name)] = entry
- case btrfsitem.Error:
+ dir.ChildrenByName[string(entry.Name)] = entry.Clone()
+ case *btrfsitem.Error:
dir.Errs = append(dir.Errs, fmt.Errorf("malformed DIR_ITEM: %w", entry.Err))
default:
panic(fmt.Errorf("should not happen: DIR_ITEM has unexpected item type: %T", entry))
@@ -271,15 +273,15 @@ func (dir *Dir) populate() {
case btrfsitem.DIR_INDEX_KEY:
index := item.Key.Offset
switch entry := item.Body.(type) {
- case btrfsitem.DirEntry:
+ case *btrfsitem.DirEntry:
if other, exists := dir.ChildrenByIndex[index]; exists {
if !reflect.DeepEqual(entry, other) {
dir.Errs = append(dir.Errs, fmt.Errorf("multiple instances of direntry index %v", index))
}
continue
}
- dir.ChildrenByIndex[index] = entry
- case btrfsitem.Error:
+ dir.ChildrenByIndex[index] = entry.Clone()
+ case *btrfsitem.Error:
dir.Errs = append(dir.Errs, fmt.Errorf("malformed DIR_INDEX: %w", entry.Err))
default:
panic(fmt.Errorf("should not happen: DIR_INDEX has unexpected item type: %T", entry))
@@ -336,7 +338,7 @@ func (dir *Dir) AbsPath() (string, error) {
func (sv *Subvolume) LoadFile(inode btrfsprim.ObjID) (*File, error) {
sv.init()
- val := sv.fileCache.GetOrElse(inode, func() (val *File) {
+ val := containers.LoadOrElse[btrfsprim.ObjID, *File](&sv.fileCache, inode, func(inode btrfsprim.ObjID) (val *File) {
val = new(File)
fullInode, err := sv.LoadFullInode(inode)
if err != nil {
@@ -361,12 +363,12 @@ func (file *File) populate() {
// TODO
case btrfsitem.EXTENT_DATA_KEY:
switch itemBody := item.Body.(type) {
- case btrfsitem.FileExtent:
+ case *btrfsitem.FileExtent:
file.Extents = append(file.Extents, FileExtent{
OffsetWithinFile: int64(item.Key.Offset),
- FileExtent: itemBody,
+ FileExtent: *itemBody,
})
- case btrfsitem.Error:
+ case *btrfsitem.Error:
file.Errs = append(file.Errs, fmt.Errorf("malformed EXTENT_DATA: %w", itemBody.Err))
default:
panic(fmt.Errorf("should not happen: EXTENT_DATA has unexpected item type: %T", itemBody))