summaryrefslogtreecommitdiff
path: root/lib/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'lib/btrfs')
-rw-r--r--lib/btrfs/Makefile79
-rw-r--r--lib/btrfs/aliases.go19
-rw-r--r--lib/btrfs/aliases_objid.go37
-rw-r--r--lib/btrfs/btrfsitem/item_blockgroup.go16
-rw-r--r--lib/btrfs/btrfsitem/item_chunk.go90
-rw-r--r--lib/btrfs/btrfsitem/item_dev.go33
-rw-r--r--lib/btrfs/btrfsitem/item_devextent.go32
-rw-r--r--lib/btrfs/btrfsitem/item_dir.go115
-rw-r--r--lib/btrfs/btrfsitem/item_empty.go9
-rw-r--r--lib/btrfs/btrfsitem/item_extent.go165
-rw-r--r--lib/btrfs/btrfsitem/item_extentcsum.go39
-rw-r--r--lib/btrfs/btrfsitem/item_extentdataref.go14
-rw-r--r--lib/btrfs/btrfsitem/item_fileextent.go137
-rw-r--r--lib/btrfs/btrfsitem/item_freespacebitmap.go12
-rw-r--r--lib/btrfs/btrfsitem/item_freespaceinfo.go11
-rw-r--r--lib/btrfs/btrfsitem/item_inode.go64
-rw-r--r--lib/btrfs/btrfsitem/item_inoderef.go35
-rw-r--r--lib/btrfs/btrfsitem/item_metadata.go44
-rw-r--r--lib/btrfs/btrfsitem/item_persistent.go19
-rw-r--r--lib/btrfs/btrfsitem/item_root.go51
-rw-r--r--lib/btrfs/btrfsitem/item_rootref.go34
-rw-r--r--lib/btrfs/btrfsitem/item_shareddataref.go10
-rw-r--r--lib/btrfs/btrfsitem/item_untyped.go14
-rw-r--r--lib/btrfs/btrfsitem/item_uuid.go16
-rw-r--r--lib/btrfs/btrfsitem/items.go77
-rw-r--r--lib/btrfs/btrfsitem/items.txt29
-rw-r--r--lib/btrfs/btrfsitem/items_gen.go97
-rw-r--r--lib/btrfs/btrfssum/csum.go78
-rw-r--r--lib/btrfs/btrfssum/csum_test.go35
-rw-r--r--lib/btrfs/btrfsvol/addr.go47
-rw-r--r--lib/btrfs/btrfsvol/addr_test.go36
-rw-r--r--lib/btrfs/btrfsvol/blockgroupflags.go51
-rw-r--r--lib/btrfs/btrfsvol/chunk.go96
-rw-r--r--lib/btrfs/btrfsvol/devext.go89
-rw-r--r--lib/btrfs/btrfsvol/lvm.go355
-rw-r--r--lib/btrfs/internal/itemtype.go77
-rw-r--r--lib/btrfs/internal/misc.go37
-rw-r--r--lib/btrfs/internal/objid.go144
-rw-r--r--lib/btrfs/io1_pv.go96
-rw-r--r--lib/btrfs/io2_lv.go187
-rw-r--r--lib/btrfs/io3_btree.go562
-rw-r--r--lib/btrfs/io4_fs.go404
-rw-r--r--lib/btrfs/types_node.go411
-rw-r--r--lib/btrfs/types_superblock.go233
44 files changed, 4236 insertions, 0 deletions
diff --git a/lib/btrfs/Makefile b/lib/btrfs/Makefile
new file mode 100644
index 0000000..43d665f
--- /dev/null
+++ b/lib/btrfs/Makefile
@@ -0,0 +1,79 @@
+.DEFAULT_GOAL = all
+.SECONDARY:
+.DELETE_ON_ERROR:
+
+btrfsitem/items.txt: btrfsitem $(wildcard btrfsitem/item_*.go) $(MAKEFILE_LIST)
+ { \
+ sed -En 's,^type (\S+) .* // (.*=.*),\1 \2,p' $(filter btrfsitem/item_%.go,$^) | while read -r typ keys; do \
+ for key in $$keys; do \
+ echo "$$key" "$$typ"; \
+ done; \
+ done; \
+ } | LC_COLLATE=C sort >$@
+files += btrfsitem/items.txt
+
+btrfsitem/items_gen.go: btrfsitem/items.txt $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package $(@D)'; \
+ echo 'import ('; \
+ echo '"reflect"'; \
+ echo; \
+ echo '"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"'; \
+ echo ')'; \
+ echo 'const ('; \
+ sed -E 's,(.*)=(.*) (.*),\1_KEY=internal.\1_KEY,' $<; \
+ echo ')'; \
+ echo 'var keytype2gotype = map[Type]reflect.Type{'; \
+ sed -En 's|(.*)=([^:]*) (.*)|\1_KEY: reflect.TypeOf(\3{}),|p' $<; \
+ echo '}'; \
+ echo 'var untypedObjID2gotype = map[internal.ObjID]reflect.Type{'; \
+ sed -En 's|UNTYPED=0:(.*) (.*)|internal.\1: reflect.TypeOf(\2{}),|p' $<; \
+ echo '}'; \
+ sed -En 's,(.*)=(.*) (.+),\3,p' $< | LC_COLLATE=C sort -u | sed 's,.*,func (&) isItem() {},'; \
+ } | gofmt >$@
+files += btrfsitem/items_gen.go
+
+internal/itemtype.go: btrfsitem/items.txt $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package $(@D)'; \
+ echo 'import "fmt"'; \
+ echo 'type ItemType uint8'; \
+ echo 'const ('; \
+ sed -E 's,(.*)=([^:]*)(:.*)? (.*),\1_KEY=ItemType(\2),' $< | uniq; \
+ echo ')'; \
+ echo 'func (t ItemType) String() string {'; \
+ echo ' names := map[ItemType]string{'; \
+ sed -E 's@(.*)=(.*) (.*)@\1_KEY: "\1",@' $< | sed 's/"UUID_/&KEY_/'; \
+ echo ' }'; \
+ echo ' if name, ok := names[t]; ok {'; \
+ echo ' return name'; \
+ echo ' }'; \
+ echo ' return fmt.Sprintf("%d", t)'; \
+ echo '}'; \
+ } | gofmt >$@
+files += internal/itemtype.go
+
+aliases_objid.go: internal/objid.go $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package btrfs'; \
+ echo 'import ('; \
+ echo '"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"'; \
+ echo ')'; \
+ echo 'const('; \
+ sed -En 's/^\s*(\S*_OBJECTIDS?)\s*=.*/\1 = internal.\1/p' <$<; \
+ echo ')'; \
+ } | gofmt >$@
+files += aliases_objid.go
+
+all: $(files)
+.PHONY: all
+
+clean:
+ rm -f -- $(files)
+.PHONY: all
diff --git a/lib/btrfs/aliases.go b/lib/btrfs/aliases.go
new file mode 100644
index 0000000..75cbf8b
--- /dev/null
+++ b/lib/btrfs/aliases.go
@@ -0,0 +1,19 @@
+package btrfs
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type (
+ // (u)int64 types
+
+ Generation = internal.Generation
+ ObjID = internal.ObjID
+
+ // complex types
+
+ Key = internal.Key
+ Time = internal.Time
+ UUID = util.UUID
+)
diff --git a/lib/btrfs/aliases_objid.go b/lib/btrfs/aliases_objid.go
new file mode 100644
index 0000000..6ccbbad
--- /dev/null
+++ b/lib/btrfs/aliases_objid.go
@@ -0,0 +1,37 @@
+// Code generated by Make. DO NOT EDIT.
+
+package btrfs
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+const (
+ ROOT_TREE_OBJECTID = internal.ROOT_TREE_OBJECTID
+ EXTENT_TREE_OBJECTID = internal.EXTENT_TREE_OBJECTID
+ CHUNK_TREE_OBJECTID = internal.CHUNK_TREE_OBJECTID
+ DEV_TREE_OBJECTID = internal.DEV_TREE_OBJECTID
+ FS_TREE_OBJECTID = internal.FS_TREE_OBJECTID
+ ROOT_TREE_DIR_OBJECTID = internal.ROOT_TREE_DIR_OBJECTID
+ CSUM_TREE_OBJECTID = internal.CSUM_TREE_OBJECTID
+ QUOTA_TREE_OBJECTID = internal.QUOTA_TREE_OBJECTID
+ UUID_TREE_OBJECTID = internal.UUID_TREE_OBJECTID
+ FREE_SPACE_TREE_OBJECTID = internal.FREE_SPACE_TREE_OBJECTID
+ BLOCK_GROUP_TREE_OBJECTID = internal.BLOCK_GROUP_TREE_OBJECTID
+ DEV_STATS_OBJECTID = internal.DEV_STATS_OBJECTID
+ BALANCE_OBJECTID = internal.BALANCE_OBJECTID
+ ORPHAN_OBJECTID = internal.ORPHAN_OBJECTID
+ TREE_LOG_OBJECTID = internal.TREE_LOG_OBJECTID
+ TREE_LOG_FIXUP_OBJECTID = internal.TREE_LOG_FIXUP_OBJECTID
+ TREE_RELOC_OBJECTID = internal.TREE_RELOC_OBJECTID
+ DATA_RELOC_TREE_OBJECTID = internal.DATA_RELOC_TREE_OBJECTID
+ EXTENT_CSUM_OBJECTID = internal.EXTENT_CSUM_OBJECTID
+ FREE_SPACE_OBJECTID = internal.FREE_SPACE_OBJECTID
+ FREE_INO_OBJECTID = internal.FREE_INO_OBJECTID
+ MULTIPLE_OBJECTIDS = internal.MULTIPLE_OBJECTIDS
+ FIRST_FREE_OBJECTID = internal.FIRST_FREE_OBJECTID
+ LAST_FREE_OBJECTID = internal.LAST_FREE_OBJECTID
+ FIRST_CHUNK_TREE_OBJECTID = internal.FIRST_CHUNK_TREE_OBJECTID
+ DEV_ITEMS_OBJECTID = internal.DEV_ITEMS_OBJECTID
+ EMPTY_SUBVOL_DIR_OBJECTID = internal.EMPTY_SUBVOL_DIR_OBJECTID
+)
diff --git a/lib/btrfs/btrfsitem/item_blockgroup.go b/lib/btrfs/btrfsitem/item_blockgroup.go
new file mode 100644
index 0000000..71d960d
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_blockgroup.go
@@ -0,0 +1,16 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = logical_addr
+// key.offset = size of chunk
+type BlockGroup struct { // BLOCK_GROUP_ITEM=192
+ Used int64 `bin:"off=0, siz=8"`
+ ChunkObjectID internal.ObjID `bin:"off=8, siz=8"` // always BTRFS_FIRST_CHUNK_TREE_OBJECTID
+ Flags btrfsvol.BlockGroupFlags `bin:"off=16, siz=8"`
+ binstruct.End `bin:"off=24"`
+}
diff --git a/lib/btrfs/btrfsitem/item_chunk.go b/lib/btrfs/btrfsitem/item_chunk.go
new file mode 100644
index 0000000..9256651
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_chunk.go
@@ -0,0 +1,90 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// Maps logical address to physical.
+//
+// key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID
+// key.offset = logical_addr
+type Chunk struct { // CHUNK_ITEM=228
+ Head ChunkHeader
+ Stripes []ChunkStripe
+}
+
+type ChunkHeader struct {
+ Size btrfsvol.AddrDelta `bin:"off=0x0, siz=0x8"`
+ Owner internal.ObjID `bin:"off=0x8, siz=0x8"` // root referencing this chunk (always EXTENT_TREE_OBJECTID=2)
+ StripeLen uint64 `bin:"off=0x10, siz=0x8"` // ???
+ Type btrfsvol.BlockGroupFlags `bin:"off=0x18, siz=0x8"`
+ IOOptimalAlign uint32 `bin:"off=0x20, siz=0x4"`
+ IOOptimalWidth uint32 `bin:"off=0x24, siz=0x4"`
+ IOMinSize uint32 `bin:"off=0x28, siz=0x4"` // sector size
+ NumStripes uint16 `bin:"off=0x2c, siz=0x2"` // [ignored-when-writing]
+ SubStripes uint16 `bin:"off=0x2e, siz=0x2"` // ???
+ binstruct.End `bin:"off=0x30"`
+}
+
+type ChunkStripe struct {
+ DeviceID btrfsvol.DeviceID `bin:"off=0x0, siz=0x8"`
+ Offset btrfsvol.PhysicalAddr `bin:"off=0x8, siz=0x8"`
+ DeviceUUID util.UUID `bin:"off=0x10, siz=0x10"`
+ binstruct.End `bin:"off=0x20"`
+}
+
+func (chunk Chunk) Mappings(key internal.Key) []btrfsvol.Mapping {
+ ret := make([]btrfsvol.Mapping, 0, len(chunk.Stripes))
+ for _, stripe := range chunk.Stripes {
+ ret = append(ret, btrfsvol.Mapping{
+ LAddr: btrfsvol.LogicalAddr(key.Offset),
+ PAddr: btrfsvol.QualifiedPhysicalAddr{
+ Dev: stripe.DeviceID,
+ Addr: stripe.Offset,
+ },
+ Size: chunk.Head.Size,
+ SizeLocked: true,
+ Flags: &chunk.Head.Type,
+ })
+ }
+ return ret
+}
+
+func (chunk *Chunk) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &chunk.Head)
+ if err != nil {
+ return n, err
+ }
+ chunk.Stripes = nil
+ for i := 0; i < int(chunk.Head.NumStripes); i++ {
+ var stripe ChunkStripe
+ _n, err := binstruct.Unmarshal(dat[n:], &stripe)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *chunk, err)
+ }
+ chunk.Stripes = append(chunk.Stripes, stripe)
+ }
+ return n, nil
+}
+
+func (chunk Chunk) MarshalBinary() ([]byte, error) {
+ chunk.Head.NumStripes = uint16(len(chunk.Stripes))
+ ret, err := binstruct.Marshal(chunk.Head)
+ if err != nil {
+ return ret, err
+ }
+ for _, stripe := range chunk.Stripes {
+ _ret, err := binstruct.Marshal(stripe)
+ ret = append(ret, _ret...)
+ if err != nil {
+ return ret, fmt.Errorf("%T.MarshalBinary: %w", chunk, err)
+ }
+ }
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_dev.go b/lib/btrfs/btrfsitem/item_dev.go
new file mode 100644
index 0000000..d3fe582
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_dev.go
@@ -0,0 +1,33 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// key.objectid = BTRFS_DEV_ITEMS_OBJECTID
+// key.offset = device_id (starting at 1)
+type Dev struct { // DEV_ITEM=216
+ DevID btrfsvol.DeviceID `bin:"off=0x0, siz=0x8"`
+
+ NumBytes uint64 `bin:"off=0x8, siz=0x8"`
+ NumBytesUsed uint64 `bin:"off=0x10, siz=0x8"`
+
+ IOOptimalAlign uint32 `bin:"off=0x18, siz=0x4"`
+ IOOptimalWidth uint32 `bin:"off=0x1c, siz=0x4"`
+ IOMinSize uint32 `bin:"off=0x20, siz=0x4"` // sector size
+
+ Type uint64 `bin:"off=0x24, siz=0x8"`
+ Generation internal.Generation `bin:"off=0x2c, siz=0x8"`
+ StartOffset uint64 `bin:"off=0x34, siz=0x8"`
+ DevGroup uint32 `bin:"off=0x3c, siz=0x4"`
+ SeekSpeed uint8 `bin:"off=0x40, siz=0x1"`
+ Bandwidth uint8 `bin:"off=0x41, siz=0x1"`
+
+ DevUUID util.UUID `bin:"off=0x42, siz=0x10"`
+ FSUUID util.UUID `bin:"off=0x52, siz=0x10"`
+
+ binstruct.End `bin:"off=0x62"`
+}
diff --git a/lib/btrfs/btrfsitem/item_devextent.go b/lib/btrfs/btrfsitem/item_devextent.go
new file mode 100644
index 0000000..c346d85
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_devextent.go
@@ -0,0 +1,32 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// key.objectid = device_id
+// key.offset = physical_addr
+type DevExtent struct { // DEV_EXTENT=204
+ ChunkTree int64 `bin:"off=0, siz=8"`
+ ChunkObjectID internal.ObjID `bin:"off=8, siz=8"`
+ ChunkOffset btrfsvol.LogicalAddr `bin:"off=16, siz=8"`
+ Length btrfsvol.AddrDelta `bin:"off=24, siz=8"`
+ ChunkTreeUUID util.UUID `bin:"off=32, siz=16"`
+ binstruct.End `bin:"off=48"`
+}
+
+func (devext DevExtent) Mapping(key internal.Key) btrfsvol.Mapping {
+ return btrfsvol.Mapping{
+ LAddr: devext.ChunkOffset,
+ PAddr: btrfsvol.QualifiedPhysicalAddr{
+ Dev: btrfsvol.DeviceID(key.ObjectID),
+ Addr: btrfsvol.PhysicalAddr(key.Offset),
+ },
+ Size: devext.Length,
+ SizeLocked: true,
+ Flags: nil,
+ }
+}
diff --git a/lib/btrfs/btrfsitem/item_dir.go b/lib/btrfs/btrfsitem/item_dir.go
new file mode 100644
index 0000000..57f6d6d
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_dir.go
@@ -0,0 +1,115 @@
+package btrfsitem
+
+import (
+ "fmt"
+ "hash/crc32"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = inode of directory containing this entry
+// key.offset =
+// for DIR_ITEM and XATTR_ITEM = NameHash(name)
+// for DIR_INDEX = index id in the directory (starting at 2, because "." and "..")
+type DirEntries []DirEntry // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
+
+func NameHash(dat []byte) uint64 {
+ return uint64(^crc32.Update(1, crc32.MakeTable(crc32.Castagnoli), dat))
+}
+
+func (o *DirEntries) UnmarshalBinary(dat []byte) (int, error) {
+ *o = nil
+ n := 0
+ for n < len(dat) {
+ var ref DirEntry
+ _n, err := binstruct.Unmarshal(dat, &ref)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ *o = append(*o, ref)
+ }
+ return n, nil
+}
+
+func (o DirEntries) MarshalBinary() ([]byte, error) {
+ var ret []byte
+ for _, ref := range o {
+ bs, err := binstruct.Marshal(ref)
+ ret = append(ret, bs...)
+ if err != nil {
+ return ret, err
+ }
+ }
+ return ret, nil
+}
+
+type DirEntry struct {
+ Location internal.Key `bin:"off=0x0, siz=0x11"`
+ TransID int64 `bin:"off=0x11, siz=8"`
+ DataLen uint16 `bin:"off=0x19, siz=2"` // [ignored-when-writing]
+ NameLen uint16 `bin:"off=0x1b, siz=2"` // [ignored-when-writing]
+ Type FileType `bin:"off=0x1d, siz=1"`
+ binstruct.End `bin:"off=0x1e"`
+ Data []byte `bin:"-"`
+ Name []byte `bin:"-"`
+}
+
+func (o *DirEntry) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ o.Data = dat[n : n+int(o.DataLen)]
+ n += int(o.DataLen)
+ o.Name = dat[n : n+int(o.NameLen)]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o DirEntry) MarshalBinary() ([]byte, error) {
+ o.DataLen = uint16(len(o.Data))
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Data...)
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
+
+type FileType uint8
+
+const (
+ FT_UNKNOWN = FileType(0)
+ FT_REG_FILE = FileType(1)
+ FT_DIR = FileType(2)
+ FT_CHRDEV = FileType(3)
+ FT_BLKDEV = FileType(4)
+ FT_FIFO = FileType(5)
+ FT_SOCK = FileType(6)
+ FT_SYMLINK = FileType(7)
+ FT_XATTR = FileType(8)
+
+ FT_MAX = FileType(9)
+)
+
+func (ft FileType) String() string {
+ names := map[FileType]string{
+ FT_UNKNOWN: "UNKNOWN",
+ FT_REG_FILE: "FILE", // XXX
+ FT_DIR: "DIR",
+ FT_CHRDEV: "CHRDEV",
+ FT_BLKDEV: "BLKDEV",
+ FT_FIFO: "FIFO",
+ FT_SOCK: "SOCK",
+ FT_SYMLINK: "SYMLINK",
+ FT_XATTR: "XATTR",
+ }
+ if name, ok := names[ft]; ok {
+ return name
+ }
+ return fmt.Sprintf("DIR_ITEM.%d", uint8(ft))
+}
diff --git a/lib/btrfs/btrfsitem/item_empty.go b/lib/btrfs/btrfsitem/item_empty.go
new file mode 100644
index 0000000..209b2fc
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_empty.go
@@ -0,0 +1,9 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type Empty struct { // ORPHAN_ITEM=48 TREE_BLOCK_REF=176 SHARED_BLOCK_REF=182 FREE_SPACE_EXTENT=199 QGROUP_RELATION=246
+ binstruct.End `bin:"off=0"`
+}
diff --git a/lib/btrfs/btrfsitem/item_extent.go b/lib/btrfs/btrfsitem/item_extent.go
new file mode 100644
index 0000000..9a9ea55
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extent.go
@@ -0,0 +1,165 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Extent struct { // EXTENT_ITEM=168
+ Head ExtentHeader
+ Info TreeBlockInfo // only if .Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK)
+ Refs []ExtentInlineRef
+}
+
+func (o *Extent) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &o.Head)
+ if err != nil {
+ return n, err
+ }
+ if o.Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK) {
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Info)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ }
+ o.Refs = nil
+ for n < len(dat) {
+ var ref ExtentInlineRef
+ _n, err := binstruct.Unmarshal(dat[n:], &ref)
+ n += _n
+ o.Refs = append(o.Refs, ref)
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (o Extent) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(o.Head)
+ if err != nil {
+ return dat, err
+ }
+ if o.Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK) {
+ bs, err := binstruct.Marshal(o.Info)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ for _, ref := range o.Refs {
+ bs, err := binstruct.Marshal(ref)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ return dat, nil
+}
+
+type ExtentHeader struct {
+ Refs int64 `bin:"off=0, siz=8"`
+ Generation internal.Generation `bin:"off=8, siz=8"`
+ Flags ExtentFlags `bin:"off=16, siz=8"`
+ binstruct.End `bin:"off=24"`
+}
+
+type TreeBlockInfo struct {
+ Key internal.Key `bin:"off=0, siz=0x11"`
+ Level uint8 `bin:"off=0x11, siz=0x8"`
+ binstruct.End `bin:"off=0x19"`
+}
+
+type ExtentFlags uint64
+
+const (
+ EXTENT_FLAG_DATA = ExtentFlags(1 << iota)
+ EXTENT_FLAG_TREE_BLOCK
+)
+
+var extentFlagNames = []string{
+ "DATA",
+ "TREE_BLOCK",
+}
+
+func (f ExtentFlags) Has(req ExtentFlags) bool { return f&req == req }
+func (f ExtentFlags) String() string { return util.BitfieldString(f, extentFlagNames, util.HexNone) }
+
+type ExtentInlineRef struct {
+ Type Type // only 4 valid values: {TREE,SHARED}_BLOCK_REF_KEY, {EXTENT,SHARED}_DATA_REF_KEY
+ Offset uint64 // only when Type != EXTENT_DATA_REF_KEY
+ Body Item // only when Type == *_DATA_REF_KEY
+}
+
+func (o *ExtentInlineRef) UnmarshalBinary(dat []byte) (int, error) {
+ o.Type = Type(dat[0])
+ n := 1
+ switch o.Type {
+ case TREE_BLOCK_REF_KEY, SHARED_BLOCK_REF_KEY:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Offset)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ case EXTENT_DATA_REF_KEY:
+ var dref ExtentDataRef
+ _n, err := binstruct.Unmarshal(dat[n:], &dref)
+ n += _n
+ o.Body = dref
+ if err != nil {
+ return n, err
+ }
+ case SHARED_DATA_REF_KEY:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Offset)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ var sref SharedDataRef
+ _n, err = binstruct.Unmarshal(dat[n:], &sref)
+ n += _n
+ o.Body = sref
+ if err != nil {
+ return n, err
+ }
+ default:
+ return n, fmt.Errorf("btrfsitem.ExtentInlineRef.UnmarshalBinary: unexpected item type %v", o.Type)
+ }
+ return n, nil
+}
+
+func (o ExtentInlineRef) MarshalBinary() ([]byte, error) {
+ dat := []byte{byte(o.Type)}
+ switch o.Type {
+ case TREE_BLOCK_REF_KEY, SHARED_BLOCK_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Offset)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ case EXTENT_DATA_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Body)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ case SHARED_DATA_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Offset)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ _dat, err = binstruct.Marshal(o.Body)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ default:
+ return dat, fmt.Errorf("btrfsitem.ExtentInlineRef.MarshalBinary: unexpected item type %v", o.Type)
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_extentcsum.go b/lib/btrfs/btrfsitem/item_extentcsum.go
new file mode 100644
index 0000000..b27dbde
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extentcsum.go
@@ -0,0 +1,39 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+)
+
+// key.objectid = BTRFS_EXTENT_CSUM_OBJECTID
+// key.offset = laddr of checksummed region
+type ExtentCSum struct { // EXTENT_CSUM=128
+ ChecksumSize int
+ // Checksum of each sector starting at key.offset
+ Sums []btrfssum.CSum
+}
+
+func (o *ExtentCSum) UnmarshalBinary(dat []byte) (int, error) {
+ if o.ChecksumSize == 0 {
+ return 0, fmt.Errorf("btrfs.ExtentCSum.UnmarshalBinary: .ChecksumSize must be set")
+ }
+ for len(dat) >= o.ChecksumSize {
+ var csum btrfssum.CSum
+ copy(csum[:], dat[:o.ChecksumSize])
+ dat = dat[o.ChecksumSize:]
+ o.Sums = append(o.Sums, csum)
+ }
+ return len(o.Sums) * o.ChecksumSize, nil
+}
+
+func (o ExtentCSum) MarshalBinary() ([]byte, error) {
+ if o.ChecksumSize == 0 {
+ return nil, fmt.Errorf("btrfs.ExtentCSum.MarshalBinary: .ChecksumSize must be set")
+ }
+ var dat []byte
+ for _, csum := range o.Sums {
+ dat = append(dat, csum[:o.ChecksumSize]...)
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_extentdataref.go b/lib/btrfs/btrfsitem/item_extentdataref.go
new file mode 100644
index 0000000..aab5426
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extentdataref.go
@@ -0,0 +1,14 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type ExtentDataRef struct { // EXTENT_DATA_REF=178
+ Root internal.ObjID `bin:"off=0, siz=8"`
+ ObjectID internal.ObjID `bin:"off=8, siz=8"`
+ Offset int64 `bin:"off=16, siz=8"`
+ Count int32 `bin:"off=24, siz=4"`
+ binstruct.End `bin:"off=28"`
+}
diff --git a/lib/btrfs/btrfsitem/item_fileextent.go b/lib/btrfs/btrfsitem/item_fileextent.go
new file mode 100644
index 0000000..2f3ac2b
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_fileextent.go
@@ -0,0 +1,137 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = inode
+// key.offset = offset within file
+type FileExtent struct { // EXTENT_DATA=108
+ Generation internal.Generation `bin:"off=0x0, siz=0x8"` // transaction ID that created this extent
+ RAMBytes int64 `bin:"off=0x8, siz=0x8"` // upper bound of what compressed data will decompress to
+
+ // 32 bits describing the data encoding
+ Compression CompressionType `bin:"off=0x10, siz=0x1"`
+ Encryption uint8 `bin:"off=0x11, siz=0x1"`
+ OtherEncoding uint16 `bin:"off=0x12, siz=0x2"` // reserved for later use
+
+ Type FileExtentType `bin:"off=0x14, siz=0x1"` // inline data or real extent
+
+ binstruct.End `bin:"off=0x15"`
+
+ // only one of these, depending on .Type
+ BodyInline []byte `bin:"-"` // .Type == FILE_EXTENT_INLINE
+ BodyExtent struct { // .Type == FILE_EXTENT_REG or FILE_EXTENT_PREALLOC
+ // Position and size of extent within the device
+ DiskByteNr btrfsvol.LogicalAddr `bin:"off=0x0, siz=0x8"`
+ DiskNumBytes btrfsvol.AddrDelta `bin:"off=0x8, siz=0x8"`
+
+ // Position of data within the extent
+ Offset btrfsvol.AddrDelta `bin:"off=0x10, siz=0x8"`
+
+ // Decompressed/unencrypted size
+ NumBytes int64 `bin:"off=0x18, siz=0x8"`
+
+ binstruct.End `bin:"off=0x20"`
+ } `bin:"-"`
+}
+
+func (o *FileExtent) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ o.BodyInline = dat[n:]
+ n += len(o.BodyInline)
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.BodyExtent)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ default:
+ return n, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+ return n, nil
+}
+
+func (o FileExtent) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ dat = append(dat, o.BodyInline...)
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ bs, err := binstruct.Marshal(o.BodyExtent)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ default:
+ return dat, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+ return dat, nil
+}
+
+type FileExtentType uint8
+
+const (
+ FILE_EXTENT_INLINE = FileExtentType(iota)
+ FILE_EXTENT_REG
+ FILE_EXTENT_PREALLOC
+)
+
+func (o FileExtent) Size() (int64, error) {
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ return int64(len(o.BodyInline)), nil
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ return o.BodyExtent.NumBytes, nil
+ default:
+ return 0, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+}
+
+func (fet FileExtentType) String() string {
+ names := map[FileExtentType]string{
+ FILE_EXTENT_INLINE: "inline",
+ FILE_EXTENT_REG: "regular",
+ FILE_EXTENT_PREALLOC: "prealloc",
+ }
+ name, ok := names[fet]
+ if !ok {
+ name = "unknown"
+ }
+ return fmt.Sprintf("%d (%s)", fet, name)
+}
+
+type CompressionType uint8
+
+const (
+ COMPRESS_NONE = CompressionType(iota)
+ COMPRESS_ZLIB
+ COMPRESS_LZO
+ COMPRESS_ZSTD
+)
+
+func (ct CompressionType) String() string {
+ names := map[CompressionType]string{
+ COMPRESS_NONE: "none",
+ COMPRESS_ZLIB: "zlib",
+ COMPRESS_LZO: "lzo",
+ COMPRESS_ZSTD: "zstd",
+ }
+ name, ok := names[ct]
+ if !ok {
+ name = "unknown"
+ }
+ return fmt.Sprintf("%d (%s)", ct, name)
+}
diff --git a/lib/btrfs/btrfsitem/item_freespacebitmap.go b/lib/btrfs/btrfsitem/item_freespacebitmap.go
new file mode 100644
index 0000000..6158eb0
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_freespacebitmap.go
@@ -0,0 +1,12 @@
+package btrfsitem
+
+type FreeSpaceBitmap []byte // FREE_SPACE_BITMAP=200
+
+func (o *FreeSpaceBitmap) UnmarshalBinary(dat []byte) (int, error) {
+ *o = dat
+ return len(dat), nil
+}
+
+func (o FreeSpaceBitmap) MarshalBinary() ([]byte, error) {
+ return []byte(o), nil
+}
diff --git a/lib/btrfs/btrfsitem/item_freespaceinfo.go b/lib/btrfs/btrfsitem/item_freespaceinfo.go
new file mode 100644
index 0000000..89f555e
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_freespaceinfo.go
@@ -0,0 +1,11 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type FreeSpaceInfo struct { // FREE_SPACE_INFO=198
+ ExtentCount int32 `bin:"off=0, siz=4"`
+ Flags uint32 `bin:"off=4, siz=4"`
+ binstruct.End `bin:"off=8"`
+}
diff --git a/lib/btrfs/btrfsitem/item_inode.go b/lib/btrfs/btrfsitem/item_inode.go
new file mode 100644
index 0000000..9b1b91b
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_inode.go
@@ -0,0 +1,64 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/linux"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Inode struct { // INODE_ITEM=1
+ Generation internal.Generation `bin:"off=0x00, siz=0x08"`
+ TransID int64 `bin:"off=0x08, siz=0x08"`
+ Size int64 `bin:"off=0x10, siz=0x08"` // stat
+ NumBytes int64 `bin:"off=0x18, siz=0x08"`
+ BlockGroup int64 `bin:"off=0x20, siz=0x08"`
+ NLink int32 `bin:"off=0x28, siz=0x04"` // stat
+ UID int32 `bin:"off=0x2C, siz=0x04"` // stat
+ GID int32 `bin:"off=0x30, siz=0x04"` // stat
+ Mode linux.StatMode `bin:"off=0x34, siz=0x04"` // stat
+ RDev int64 `bin:"off=0x38, siz=0x08"` // stat
+ Flags InodeFlags `bin:"off=0x40, siz=0x08"` // statx.stx_attributes, sorta
+ Sequence int64 `bin:"off=0x48, siz=0x08"` // NFS
+ Reserved [4]int64 `bin:"off=0x50, siz=0x20"`
+ ATime internal.Time `bin:"off=0x70, siz=0x0c"` // stat
+ CTime internal.Time `bin:"off=0x7c, siz=0x0c"` // stat
+ MTime internal.Time `bin:"off=0x88, siz=0x0c"` // stat
+ OTime internal.Time `bin:"off=0x94, siz=0x0c"` // statx.stx_btime (why is this called "otime" instead of "btime"?)
+ binstruct.End `bin:"off=0xa0"`
+}
+
+type InodeFlags uint64
+
+const (
+ INODE_NODATASUM = InodeFlags(1 << iota)
+ INODE_NODATACOW
+ INODE_READONLY
+ INODE_NOCOMPRESS
+ INODE_PREALLOC
+ INODE_SYNC
+ INODE_IMMUTABLE
+ INODE_APPEND
+ INODE_NODUMP
+ INODE_NOATIME
+ INODE_DIRSYNC
+ INODE_COMPRESS
+)
+
+var inodeFlagNames = []string{
+ "NODATASUM",
+ "NODATACOW",
+ "READONLY",
+ "NOCOMPRESS",
+ "PREALLOC",
+ "SYNC",
+ "IMMUTABLE",
+ "APPEND",
+ "NODUMP",
+ "NOATIME",
+ "DIRSYNC",
+ "COMPRESS",
+}
+
+func (f InodeFlags) Has(req InodeFlags) bool { return f&req == req }
+func (f InodeFlags) String() string { return util.BitfieldString(f, inodeFlagNames, util.HexLower) }
diff --git a/lib/btrfs/btrfsitem/item_inoderef.go b/lib/btrfs/btrfsitem/item_inoderef.go
new file mode 100644
index 0000000..80d70e1
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_inoderef.go
@@ -0,0 +1,35 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+// key.objectid = inode number of the file
+// key.offset = inode number of the parent file
+type InodeRef struct { // INODE_REF=12
+ Index int64 `bin:"off=0x0, siz=0x8"`
+ NameLen uint16 `bin:"off=0x8, siz=0x2"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0xa"`
+ Name []byte `bin:"-"`
+}
+
+func (o *InodeRef) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ dat = dat[n:]
+ o.Name = dat[:o.NameLen]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o InodeRef) MarshalBinary() ([]byte, error) {
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_metadata.go b/lib/btrfs/btrfsitem/item_metadata.go
new file mode 100644
index 0000000..d51a340
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_metadata.go
@@ -0,0 +1,44 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+// Metadata is like Extent, but doesn't have .Info.
+type Metadata struct { // METADATA_ITEM=169
+ Head ExtentHeader
+ Refs []ExtentInlineRef
+}
+
+func (o *Metadata) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &o.Head)
+ if err != nil {
+ return n, err
+ }
+ o.Refs = nil
+ for n < len(dat) {
+ var ref ExtentInlineRef
+ _n, err := binstruct.Unmarshal(dat[n:], &ref)
+ n += _n
+ o.Refs = append(o.Refs, ref)
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (o Metadata) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(o.Head)
+ if err != nil {
+ return dat, err
+ }
+ for _, ref := range o.Refs {
+ bs, err := binstruct.Marshal(ref)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_persistent.go b/lib/btrfs/btrfsitem/item_persistent.go
new file mode 100644
index 0000000..cbbae76
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_persistent.go
@@ -0,0 +1,19 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+const (
+ DEV_STAT_WRITE_ERRS = iota
+ DEV_STAT_READ_ERRS
+ DEV_STAT_FLUSH_ERRS
+ DEV_STAT_CORRUPTION_ERRS
+ DEV_STAT_GENERATION_ERRS
+ DEV_STAT_VALUES_MAX
+)
+
+type DevStats struct { // PERSISTENT_ITEM=249
+ Values [DEV_STAT_VALUES_MAX]int64 `bin:"off=0, siz=40"`
+ binstruct.End `bin:"off=40"`
+}
diff --git a/lib/btrfs/btrfsitem/item_root.go b/lib/btrfs/btrfsitem/item_root.go
new file mode 100644
index 0000000..ff9311f
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_root.go
@@ -0,0 +1,51 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Root struct { // ROOT_ITEM=132
+ Inode Inode `bin:"off=0x000, siz=0xa0"`
+ Generation internal.Generation `bin:"off=0x0a0, siz=0x08"`
+ RootDirID internal.ObjID `bin:"off=0x0a8, siz=0x08"`
+ ByteNr btrfsvol.LogicalAddr `bin:"off=0x0b0, siz=0x08"`
+ ByteLimit int64 `bin:"off=0x0b8, siz=0x08"`
+ BytesUsed int64 `bin:"off=0x0c0, siz=0x08"`
+ LastSnapshot int64 `bin:"off=0x0c8, siz=0x08"`
+ Flags RootFlags `bin:"off=0x0d0, siz=0x08"`
+ Refs int32 `bin:"off=0x0d8, siz=0x04"`
+ DropProgress internal.Key `bin:"off=0x0dc, siz=0x11"`
+ DropLevel uint8 `bin:"off=0x0ed, siz=0x01"`
+ Level uint8 `bin:"off=0x0ee, siz=0x01"`
+ GenerationV2 internal.Generation `bin:"off=0x0ef, siz=0x08"`
+ UUID util.UUID `bin:"off=0x0f7, siz=0x10"`
+ ParentUUID util.UUID `bin:"off=0x107, siz=0x10"`
+ ReceivedUUID util.UUID `bin:"off=0x117, siz=0x10"`
+ CTransID int64 `bin:"off=0x127, siz=0x08"`
+ OTransID int64 `bin:"off=0x12f, siz=0x08"`
+ STransID int64 `bin:"off=0x137, siz=0x08"`
+ RTransID int64 `bin:"off=0x13f, siz=0x08"`
+ CTime internal.Time `bin:"off=0x147, siz=0x0c"`
+ OTime internal.Time `bin:"off=0x153, siz=0x0c"`
+ STime internal.Time `bin:"off=0x15f, siz=0x0c"`
+ RTime internal.Time `bin:"off=0x16b, siz=0x0c"`
+ GlobalTreeID internal.ObjID `bin:"off=0x177, siz=0x08"`
+ Reserved [7]int64 `bin:"off=0x17f, siz=0x38"`
+ binstruct.End `bin:"off=0x1b7"`
+}
+
+type RootFlags uint64
+
+const (
+ ROOT_SUBVOL_RDONLY = RootFlags(1 << iota)
+)
+
+var rootFlagNames = []string{
+ "SUBVOL_RDONLY",
+}
+
+func (f RootFlags) Has(req RootFlags) bool { return f&req == req }
+func (f RootFlags) String() string { return util.BitfieldString(f, rootFlagNames, util.HexLower) }
diff --git a/lib/btrfs/btrfsitem/item_rootref.go b/lib/btrfs/btrfsitem/item_rootref.go
new file mode 100644
index 0000000..c851474
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_rootref.go
@@ -0,0 +1,34 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type RootRef struct { // ROOT_REF=156 ROOT_BACKREF=144
+ DirID internal.ObjID `bin:"off=0x00, siz=0x8"`
+ Sequence int64 `bin:"off=0x08, siz=0x8"`
+ NameLen uint16 `bin:"off=0x10, siz=0x2"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0x12"`
+ Name []byte `bin:"-"`
+}
+
+func (o *RootRef) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ o.Name = dat[n : n+int(o.NameLen)]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o RootRef) MarshalBinary() ([]byte, error) {
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_shareddataref.go b/lib/btrfs/btrfsitem/item_shareddataref.go
new file mode 100644
index 0000000..63897aa
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_shareddataref.go
@@ -0,0 +1,10 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type SharedDataRef struct { // SHARED_DATA_REF=184
+ Count int32 `bin:"off=0, siz=4"`
+ binstruct.End `bin:"off=4"`
+}
diff --git a/lib/btrfs/btrfsitem/item_untyped.go b/lib/btrfs/btrfsitem/item_untyped.go
new file mode 100644
index 0000000..71a9af4
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_untyped.go
@@ -0,0 +1,14 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type FreeSpaceHeader struct { // UNTYPED=0:FREE_SPACE_OBJECTID
+ Location internal.Key `bin:"off=0x00, siz=0x11"`
+ Generation internal.Generation `bin:"off=0x11, siz=0x8"`
+ NumEntries int64 `bin:"off=0x19, siz=0x8"`
+ NumBitmaps int64 `bin:"off=0x21, siz=0x8"`
+ binstruct.End `bin:"off=0x29"`
+}
diff --git a/lib/btrfs/btrfsitem/item_uuid.go b/lib/btrfs/btrfsitem/item_uuid.go
new file mode 100644
index 0000000..6c7d4f0
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_uuid.go
@@ -0,0 +1,16 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// The Key for this item is a UUID, and the item is a subvolume IDs
+// that that UUID maps to.
+//
+// key.objectid = first half of UUID
+// key.offset = second half of UUID
+type UUIDMap struct { // UUID_SUBVOL=251 UUID_RECEIVED_SUBVOL=252
+ ObjID internal.ObjID `bin:"off=0, siz=8"`
+ binstruct.End `bin:"off=8"`
+}
diff --git a/lib/btrfs/btrfsitem/items.go b/lib/btrfs/btrfsitem/items.go
new file mode 100644
index 0000000..33ff390
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items.go
@@ -0,0 +1,77 @@
+package btrfsitem
+
+import (
+ "fmt"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type Type = internal.ItemType
+
+type Item interface {
+ isItem()
+}
+
+type Error struct {
+ Dat []byte
+ Err error
+}
+
+func (Error) isItem() {}
+
+func (o Error) MarshalBinary() ([]byte, error) {
+ return o.Dat, nil
+}
+
+func (o *Error) UnmarshalBinary(dat []byte) (int, error) {
+ o.Dat = dat
+ return len(dat), nil
+}
+
+// Rather than returning a separate error value, return an Error item.
+func UnmarshalItem(key internal.Key, csumType btrfssum.CSumType, dat []byte) Item {
+ var gotyp reflect.Type
+ if key.ItemType == UNTYPED_KEY {
+ var ok bool
+ gotyp, ok = untypedObjID2gotype[key.ObjectID]
+ if !ok {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v, ObjectID:%v}, dat): unknown object ID for untyped item",
+ key.ItemType, key.ObjectID),
+ }
+ }
+ } else {
+ var ok bool
+ gotyp, ok = keytype2gotype[key.ItemType]
+ if !ok {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): unknown item type", key.ItemType),
+ }
+ }
+ }
+ retPtr := reflect.New(gotyp)
+ if csums, ok := retPtr.Interface().(*ExtentCSum); ok {
+ csums.ChecksumSize = csumType.Size()
+ }
+ n, err := binstruct.Unmarshal(dat, retPtr.Interface())
+ if err != nil {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): %w", key.ItemType, err),
+ }
+
+ }
+ if n < len(dat) {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): left over data: got %v bytes but only consumed %v",
+ key.ItemType, len(dat), n),
+ }
+ }
+ return retPtr.Elem().Interface().(Item)
+}
diff --git a/lib/btrfs/btrfsitem/items.txt b/lib/btrfs/btrfsitem/items.txt
new file mode 100644
index 0000000..7898775
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items.txt
@@ -0,0 +1,29 @@
+BLOCK_GROUP_ITEM=192 BlockGroup
+CHUNK_ITEM=228 Chunk
+DEV_EXTENT=204 DevExtent
+DEV_ITEM=216 Dev
+DIR_INDEX=96 DirEntries
+DIR_ITEM=84 DirEntries
+EXTENT_CSUM=128 ExtentCSum
+EXTENT_DATA=108 FileExtent
+EXTENT_DATA_REF=178 ExtentDataRef
+EXTENT_ITEM=168 Extent
+FREE_SPACE_BITMAP=200 FreeSpaceBitmap
+FREE_SPACE_EXTENT=199 Empty
+FREE_SPACE_INFO=198 FreeSpaceInfo
+INODE_ITEM=1 Inode
+INODE_REF=12 InodeRef
+METADATA_ITEM=169 Metadata
+ORPHAN_ITEM=48 Empty
+PERSISTENT_ITEM=249 DevStats
+QGROUP_RELATION=246 Empty
+ROOT_BACKREF=144 RootRef
+ROOT_ITEM=132 Root
+ROOT_REF=156 RootRef
+SHARED_BLOCK_REF=182 Empty
+SHARED_DATA_REF=184 SharedDataRef
+TREE_BLOCK_REF=176 Empty
+UNTYPED=0:FREE_SPACE_OBJECTID FreeSpaceHeader
+UUID_RECEIVED_SUBVOL=252 UUIDMap
+UUID_SUBVOL=251 UUIDMap
+XATTR_ITEM=24 DirEntries
diff --git a/lib/btrfs/btrfsitem/items_gen.go b/lib/btrfs/btrfsitem/items_gen.go
new file mode 100644
index 0000000..3b84d60
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items_gen.go
@@ -0,0 +1,97 @@
+// Code generated by Make. DO NOT EDIT.
+
+package btrfsitem
+
+import (
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+const (
+ BLOCK_GROUP_ITEM_KEY = internal.BLOCK_GROUP_ITEM_KEY
+ CHUNK_ITEM_KEY = internal.CHUNK_ITEM_KEY
+ DEV_EXTENT_KEY = internal.DEV_EXTENT_KEY
+ DEV_ITEM_KEY = internal.DEV_ITEM_KEY
+ DIR_INDEX_KEY = internal.DIR_INDEX_KEY
+ DIR_ITEM_KEY = internal.DIR_ITEM_KEY
+ EXTENT_CSUM_KEY = internal.EXTENT_CSUM_KEY
+ EXTENT_DATA_KEY = internal.EXTENT_DATA_KEY
+ EXTENT_DATA_REF_KEY = internal.EXTENT_DATA_REF_KEY
+ EXTENT_ITEM_KEY = internal.EXTENT_ITEM_KEY
+ FREE_SPACE_BITMAP_KEY = internal.FREE_SPACE_BITMAP_KEY
+ FREE_SPACE_EXTENT_KEY = internal.FREE_SPACE_EXTENT_KEY
+ FREE_SPACE_INFO_KEY = internal.FREE_SPACE_INFO_KEY
+ INODE_ITEM_KEY = internal.INODE_ITEM_KEY
+ INODE_REF_KEY = internal.INODE_REF_KEY
+ METADATA_ITEM_KEY = internal.METADATA_ITEM_KEY
+ ORPHAN_ITEM_KEY = internal.ORPHAN_ITEM_KEY
+ PERSISTENT_ITEM_KEY = internal.PERSISTENT_ITEM_KEY
+ QGROUP_RELATION_KEY = internal.QGROUP_RELATION_KEY
+ ROOT_BACKREF_KEY = internal.ROOT_BACKREF_KEY
+ ROOT_ITEM_KEY = internal.ROOT_ITEM_KEY
+ ROOT_REF_KEY = internal.ROOT_REF_KEY
+ SHARED_BLOCK_REF_KEY = internal.SHARED_BLOCK_REF_KEY
+ SHARED_DATA_REF_KEY = internal.SHARED_DATA_REF_KEY
+ TREE_BLOCK_REF_KEY = internal.TREE_BLOCK_REF_KEY
+ UNTYPED_KEY = internal.UNTYPED_KEY
+ UUID_RECEIVED_SUBVOL_KEY = internal.UUID_RECEIVED_SUBVOL_KEY
+ UUID_SUBVOL_KEY = internal.UUID_SUBVOL_KEY
+ XATTR_ITEM_KEY = internal.XATTR_ITEM_KEY
+)
+
+var keytype2gotype = map[Type]reflect.Type{
+ BLOCK_GROUP_ITEM_KEY: reflect.TypeOf(BlockGroup{}),
+ CHUNK_ITEM_KEY: reflect.TypeOf(Chunk{}),
+ DEV_EXTENT_KEY: reflect.TypeOf(DevExtent{}),
+ DEV_ITEM_KEY: reflect.TypeOf(Dev{}),
+ DIR_INDEX_KEY: reflect.TypeOf(DirEntries{}),
+ DIR_ITEM_KEY: reflect.TypeOf(DirEntries{}),
+ EXTENT_CSUM_KEY: reflect.TypeOf(ExtentCSum{}),
+ EXTENT_DATA_KEY: reflect.TypeOf(FileExtent{}),
+ EXTENT_DATA_REF_KEY: reflect.TypeOf(ExtentDataRef{}),
+ EXTENT_ITEM_KEY: reflect.TypeOf(Extent{}),
+ FREE_SPACE_BITMAP_KEY: reflect.TypeOf(FreeSpaceBitmap{}),
+ FREE_SPACE_EXTENT_KEY: reflect.TypeOf(Empty{}),
+ FREE_SPACE_INFO_KEY: reflect.TypeOf(FreeSpaceInfo{}),
+ INODE_ITEM_KEY: reflect.TypeOf(Inode{}),
+ INODE_REF_KEY: reflect.TypeOf(InodeRef{}),
+ METADATA_ITEM_KEY: reflect.TypeOf(Metadata{}),
+ ORPHAN_ITEM_KEY: reflect.TypeOf(Empty{}),
+ PERSISTENT_ITEM_KEY: reflect.TypeOf(DevStats{}),
+ QGROUP_RELATION_KEY: reflect.TypeOf(Empty{}),
+ ROOT_BACKREF_KEY: reflect.TypeOf(RootRef{}),
+ ROOT_ITEM_KEY: reflect.TypeOf(Root{}),
+ ROOT_REF_KEY: reflect.TypeOf(RootRef{}),
+ SHARED_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
+ SHARED_DATA_REF_KEY: reflect.TypeOf(SharedDataRef{}),
+ TREE_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
+ UUID_RECEIVED_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
+ UUID_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
+ XATTR_ITEM_KEY: reflect.TypeOf(DirEntries{}),
+}
+var untypedObjID2gotype = map[internal.ObjID]reflect.Type{
+ internal.FREE_SPACE_OBJECTID: reflect.TypeOf(FreeSpaceHeader{}),
+}
+
+func (BlockGroup) isItem() {}
+func (Chunk) isItem() {}
+func (Dev) isItem() {}
+func (DevExtent) isItem() {}
+func (DevStats) isItem() {}
+func (DirEntries) isItem() {}
+func (Empty) isItem() {}
+func (Extent) isItem() {}
+func (ExtentCSum) isItem() {}
+func (ExtentDataRef) isItem() {}
+func (FileExtent) isItem() {}
+func (FreeSpaceBitmap) isItem() {}
+func (FreeSpaceHeader) isItem() {}
+func (FreeSpaceInfo) isItem() {}
+func (Inode) isItem() {}
+func (InodeRef) isItem() {}
+func (Metadata) isItem() {}
+func (Root) isItem() {}
+func (RootRef) isItem() {}
+func (SharedDataRef) isItem() {}
+func (UUIDMap) isItem() {}
diff --git a/lib/btrfs/btrfssum/csum.go b/lib/btrfs/btrfssum/csum.go
new file mode 100644
index 0000000..11a8385
--- /dev/null
+++ b/lib/btrfs/btrfssum/csum.go
@@ -0,0 +1,78 @@
+package btrfssum
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "hash/crc32"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type CSum [0x20]byte
+
+func (csum CSum) String() string {
+ return hex.EncodeToString(csum[:])
+}
+
+func (csum CSum) Fmt(typ CSumType) string {
+ return hex.EncodeToString(csum[:typ.Size()])
+}
+
+func (csum CSum) Format(f fmt.State, verb rune) {
+ util.FormatByteArrayStringer(csum, csum[:], f, verb)
+}
+
+type CSumType uint16
+
+const (
+ TYPE_CRC32 = CSumType(iota)
+ TYPE_XXHASH
+ TYPE_SHA256
+ TYPE_BLAKE2
+)
+
+func (typ CSumType) String() string {
+ names := map[CSumType]string{
+ TYPE_CRC32: "crc32c",
+ TYPE_XXHASH: "xxhash64",
+ TYPE_SHA256: "sha256",
+ TYPE_BLAKE2: "blake2",
+ }
+ if name, ok := names[typ]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", typ)
+}
+
+func (typ CSumType) Size() int {
+ sizes := map[CSumType]int{
+ TYPE_CRC32: 4,
+ TYPE_XXHASH: 8,
+ TYPE_SHA256: 32,
+ TYPE_BLAKE2: 32,
+ }
+ if size, ok := sizes[typ]; ok {
+ return size
+ }
+ return len(CSum{})
+}
+
+func (typ CSumType) Sum(data []byte) (CSum, error) {
+ switch typ {
+ case TYPE_CRC32:
+ crc := crc32.Update(0, crc32.MakeTable(crc32.Castagnoli), data)
+
+ var ret CSum
+ binary.LittleEndian.PutUint32(ret[:], crc)
+ return ret, nil
+ case TYPE_XXHASH:
+ panic("not implemented")
+ case TYPE_SHA256:
+ panic("not implemented")
+ case TYPE_BLAKE2:
+ panic("not implemented")
+ default:
+ return CSum{}, fmt.Errorf("unknown checksum type: %v", typ)
+ }
+}
diff --git a/lib/btrfs/btrfssum/csum_test.go b/lib/btrfs/btrfssum/csum_test.go
new file mode 100644
index 0000000..755ecc1
--- /dev/null
+++ b/lib/btrfs/btrfssum/csum_test.go
@@ -0,0 +1,35 @@
+package btrfssum_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+)
+
+func TestCSumFormat(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ InputSum btrfssum.CSum
+ InputFmt string
+ Output string
+ }
+ csum := btrfssum.CSum{0xbd, 0x7b, 0x41, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ testcases := map[string]TestCase{
+ "s": {InputSum: csum, InputFmt: "%s", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "x": {InputSum: csum, InputFmt: "%x", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "v": {InputSum: csum, InputFmt: "%v", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "70s": {InputSum: csum, InputFmt: "|% 70s", Output: "| bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "#180v": {InputSum: csum, InputFmt: "%#180v", Output: " btrfssum.CSum{0xbd, 0x7b, 0x41, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}"},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ actual := fmt.Sprintf(tc.InputFmt, tc.InputSum)
+ assert.Equal(t, tc.Output, actual)
+ })
+ }
+}
diff --git a/lib/btrfs/btrfsvol/addr.go b/lib/btrfs/btrfsvol/addr.go
new file mode 100644
index 0000000..6c67826
--- /dev/null
+++ b/lib/btrfs/btrfsvol/addr.go
@@ -0,0 +1,47 @@
+package btrfsvol
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type (
+ PhysicalAddr int64
+ LogicalAddr int64
+ AddrDelta int64
+)
+
+func formatAddr(addr int64, f fmt.State, verb rune) {
+ switch verb {
+ case 'v', 's', 'q':
+ str := fmt.Sprintf("%#016x", addr)
+ fmt.Fprintf(f, util.FmtStateString(f, verb), str)
+ default:
+ fmt.Fprintf(f, util.FmtStateString(f, verb), addr)
+ }
+}
+
+func (a PhysicalAddr) Format(f fmt.State, verb rune) { formatAddr(int64(a), f, verb) }
+func (a LogicalAddr) Format(f fmt.State, verb rune) { formatAddr(int64(a), f, verb) }
+func (d AddrDelta) Format(f fmt.State, verb rune) { formatAddr(int64(d), f, verb) }
+
+func (a PhysicalAddr) Sub(b PhysicalAddr) AddrDelta { return AddrDelta(a - b) }
+func (a LogicalAddr) Sub(b LogicalAddr) AddrDelta { return AddrDelta(a - b) }
+
+func (a PhysicalAddr) Add(b AddrDelta) PhysicalAddr { return a + PhysicalAddr(b) }
+func (a LogicalAddr) Add(b AddrDelta) LogicalAddr { return a + LogicalAddr(b) }
+
+type DeviceID uint64
+
+type QualifiedPhysicalAddr struct {
+ Dev DeviceID
+ Addr PhysicalAddr
+}
+
+func (a QualifiedPhysicalAddr) Cmp(b QualifiedPhysicalAddr) int {
+ if d := int(a.Dev - b.Dev); d != 0 {
+ return d
+ }
+ return int(a.Addr - b.Addr)
+}
diff --git a/lib/btrfs/btrfsvol/addr_test.go b/lib/btrfs/btrfsvol/addr_test.go
new file mode 100644
index 0000000..aae6378
--- /dev/null
+++ b/lib/btrfs/btrfsvol/addr_test.go
@@ -0,0 +1,36 @@
+package btrfsvol_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+)
+
+func TestAddrFormat(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ InputAddr btrfsvol.LogicalAddr
+ InputFmt string
+ Output string
+ }
+ addr := btrfsvol.LogicalAddr(0x3a41678000)
+ testcases := map[string]TestCase{
+ "v": {InputAddr: addr, InputFmt: "%v", Output: "0x0000003a41678000"},
+ "s": {InputAddr: addr, InputFmt: "%s", Output: "0x0000003a41678000"},
+ "q": {InputAddr: addr, InputFmt: "%q", Output: `"0x0000003a41678000"`},
+ "x": {InputAddr: addr, InputFmt: "%x", Output: "3a41678000"},
+ "d": {InputAddr: addr, InputFmt: "%d", Output: "250205405184"},
+ "neg": {InputAddr: -1, InputFmt: "%v", Output: "-0x000000000000001"},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ actual := fmt.Sprintf(tc.InputFmt, tc.InputAddr)
+ assert.Equal(t, tc.Output, actual)
+ })
+ }
+}
diff --git a/lib/btrfs/btrfsvol/blockgroupflags.go b/lib/btrfs/btrfsvol/blockgroupflags.go
new file mode 100644
index 0000000..1a194d7
--- /dev/null
+++ b/lib/btrfs/btrfsvol/blockgroupflags.go
@@ -0,0 +1,51 @@
+package btrfsvol
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type BlockGroupFlags uint64
+
+const (
+ BLOCK_GROUP_DATA = BlockGroupFlags(1 << iota)
+ BLOCK_GROUP_SYSTEM
+ BLOCK_GROUP_METADATA
+ BLOCK_GROUP_RAID0
+ BLOCK_GROUP_RAID1
+ BLOCK_GROUP_DUP
+ BLOCK_GROUP_RAID10
+ BLOCK_GROUP_RAID5
+ BLOCK_GROUP_RAID6
+ BLOCK_GROUP_RAID1C3
+ BLOCK_GROUP_RAID1C4
+
+ BLOCK_GROUP_RAID_MASK = (BLOCK_GROUP_RAID1 | BLOCK_GROUP_DUP | BLOCK_GROUP_RAID10 | BLOCK_GROUP_RAID5 | BLOCK_GROUP_RAID6 | BLOCK_GROUP_RAID1C3 | BLOCK_GROUP_RAID1C4)
+)
+
+var blockGroupFlagNames = []string{
+ "DATA",
+ "SYSTEM",
+ "METADATA",
+
+ "RAID0",
+ "RAID1",
+ "DUP",
+ "RAID10",
+ "RAID5",
+ "RAID6",
+ "RAID1C3",
+ "RAID1C4",
+}
+
+func (f BlockGroupFlags) Has(req BlockGroupFlags) bool { return f&req == req }
+func (f BlockGroupFlags) String() string {
+ ret := util.BitfieldString(f, blockGroupFlagNames, util.HexNone)
+ if f&BLOCK_GROUP_RAID_MASK == 0 {
+ if ret == "" {
+ ret = "single"
+ } else {
+ ret += "|single"
+ }
+ }
+ return ret
+}
diff --git a/lib/btrfs/btrfsvol/chunk.go b/lib/btrfs/btrfsvol/chunk.go
new file mode 100644
index 0000000..6aea483
--- /dev/null
+++ b/lib/btrfs/btrfsvol/chunk.go
@@ -0,0 +1,96 @@
+package btrfsvol
+
+import (
+ "fmt"
+ "sort"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// logical => []physical
+type chunkMapping struct {
+ LAddr LogicalAddr
+ PAddrs []QualifiedPhysicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+type ChunkMapping = chunkMapping
+
+// return -1 if 'a' is wholly to the left of 'b'
+// return 0 if there is some overlap between 'a' and 'b'
+// return 1 if 'a is wholly to the right of 'b'
+func (a chunkMapping) cmpRange(b chunkMapping) int {
+ switch {
+ case a.LAddr.Add(a.Size) <= b.LAddr:
+ // 'a' is wholly to the left of 'b'.
+ return -1
+ case b.LAddr.Add(b.Size) <= a.LAddr:
+ // 'a' is wholly to the right of 'b'.
+ return 1
+ default:
+ // There is some overlap.
+ return 0
+ }
+}
+
+func (a chunkMapping) union(rest ...chunkMapping) (chunkMapping, error) {
+ // sanity check
+ for _, chunk := range rest {
+ if a.cmpRange(chunk) != 0 {
+ return chunkMapping{}, fmt.Errorf("chunks don't overlap")
+ }
+ }
+ chunks := append([]chunkMapping{a}, rest...)
+ // figure out the logical range (.LAddr and .Size)
+ beg := chunks[0].LAddr
+ end := chunks[0].LAddr.Add(chunks[0].Size)
+ for _, chunk := range chunks {
+ beg = util.Min(beg, chunk.LAddr)
+ end = util.Max(end, chunk.LAddr.Add(chunk.Size))
+ }
+ ret := chunkMapping{
+ LAddr: beg,
+ Size: end.Sub(beg),
+ }
+ for _, chunk := range chunks {
+ if chunk.SizeLocked {
+ ret.SizeLocked = true
+ if ret.Size != chunk.Size {
+ return chunkMapping{}, fmt.Errorf("member chunk has locked size=%v, but union would have size=%v",
+ chunk.Size, ret.Size)
+ }
+ }
+ }
+ // figure out the physical stripes (.PAddrs)
+ paddrs := make(map[QualifiedPhysicalAddr]struct{})
+ for _, chunk := range chunks {
+ offsetWithinRet := chunk.LAddr.Sub(ret.LAddr)
+ for _, stripe := range chunk.PAddrs {
+ paddrs[QualifiedPhysicalAddr{
+ Dev: stripe.Dev,
+ Addr: stripe.Addr.Add(-offsetWithinRet),
+ }] = struct{}{}
+ }
+ }
+ ret.PAddrs = util.MapKeys(paddrs)
+ sort.Slice(ret.PAddrs, func(i, j int) bool {
+ return ret.PAddrs[i].Cmp(ret.PAddrs[j]) < 0
+ })
+ // figure out the flags (.Flags)
+ for _, chunk := range chunks {
+ if chunk.Flags == nil {
+ continue
+ }
+ if ret.Flags == nil {
+ val := *chunk.Flags
+ ret.Flags = &val
+ }
+ if *ret.Flags != *chunk.Flags {
+ return ret, fmt.Errorf("mismatch flags: %v != %v", *ret.Flags, *chunk.Flags)
+ }
+ }
+ // done
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsvol/devext.go b/lib/btrfs/btrfsvol/devext.go
new file mode 100644
index 0000000..43c6255
--- /dev/null
+++ b/lib/btrfs/btrfsvol/devext.go
@@ -0,0 +1,89 @@
+package btrfsvol
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// physical => logical
+type devextMapping struct {
+ PAddr PhysicalAddr
+ LAddr LogicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+// return -1 if 'a' is wholly to the left of 'b'
+// return 0 if there is some overlap between 'a' and 'b'
+// return 1 if 'a is wholly to the right of 'b'
+func (a devextMapping) cmpRange(b devextMapping) int {
+ switch {
+ case a.PAddr.Add(a.Size) <= b.PAddr:
+ // 'a' is wholly to the left of 'b'.
+ return -1
+ case b.PAddr.Add(b.Size) <= a.PAddr:
+ // 'a' is wholly to the right of 'b'.
+ return 1
+ default:
+ // There is some overlap.
+ return 0
+ }
+}
+
+func (a devextMapping) union(rest ...devextMapping) (devextMapping, error) {
+ // sanity check
+ for _, ext := range rest {
+ if a.cmpRange(ext) != 0 {
+ return devextMapping{}, fmt.Errorf("devexts don't overlap")
+ }
+ }
+ exts := append([]devextMapping{a}, rest...)
+ // figure out the physical range (.PAddr and .Size)
+ beg := exts[0].PAddr
+ end := beg.Add(exts[0].Size)
+ for _, ext := range exts {
+ beg = util.Min(beg, ext.PAddr)
+ end = util.Max(end, ext.PAddr.Add(ext.Size))
+ }
+ ret := devextMapping{
+ PAddr: beg,
+ Size: end.Sub(beg),
+ }
+ for _, ext := range exts {
+ if ext.SizeLocked {
+ ret.SizeLocked = true
+ if ret.Size != ext.Size {
+ return devextMapping{}, fmt.Errorf("member devext has locked size=%v, but union would have size=%v",
+ ext.Size, ret.Size)
+ }
+ }
+ }
+ // figure out the logical range (.LAddr)
+ first := true
+ for _, ext := range exts {
+ offsetWithinRet := ext.PAddr.Sub(ret.PAddr)
+ laddr := ext.LAddr.Add(-offsetWithinRet)
+ if first {
+ ret.LAddr = laddr
+ } else if laddr != ret.LAddr {
+ return ret, fmt.Errorf("devexts don't agree on laddr: %v != %v", ret.LAddr, laddr)
+ }
+ }
+ // figure out the flags (.Flags)
+ for _, ext := range exts {
+ if ext.Flags == nil {
+ continue
+ }
+ if ret.Flags == nil {
+ val := *ext.Flags
+ ret.Flags = &val
+ }
+ if *ret.Flags != *ext.Flags {
+ return ret, fmt.Errorf("mismatch flags: %v != %v", *ret.Flags, *ext.Flags)
+ }
+ }
+ // done
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsvol/lvm.go b/lib/btrfs/btrfsvol/lvm.go
new file mode 100644
index 0000000..3b9ccf6
--- /dev/null
+++ b/lib/btrfs/btrfsvol/lvm.go
@@ -0,0 +1,355 @@
+package btrfsvol
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/rbtree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type LogicalVolume[PhysicalVolume util.File[PhysicalAddr]] struct {
+ name string
+
+ id2pv map[DeviceID]PhysicalVolume
+
+ logical2physical *rbtree.Tree[LogicalAddr, chunkMapping]
+ physical2logical map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping]
+}
+
+var _ util.File[LogicalAddr] = (*LogicalVolume[util.File[PhysicalAddr]])(nil)
+
+func (lv *LogicalVolume[PhysicalVolume]) init() {
+ if lv.id2pv == nil {
+ lv.id2pv = make(map[DeviceID]PhysicalVolume)
+ }
+ if lv.logical2physical == nil {
+ lv.logical2physical = &rbtree.Tree[LogicalAddr, chunkMapping]{
+ KeyFn: func(chunk chunkMapping) LogicalAddr {
+ return chunk.LAddr
+ },
+ }
+ }
+ if lv.physical2logical == nil {
+ lv.physical2logical = make(map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping], len(lv.id2pv))
+ }
+ for devid := range lv.id2pv {
+ if _, ok := lv.physical2logical[devid]; !ok {
+ lv.physical2logical[devid] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ }
+ }
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) SetName(name string) {
+ lv.name = name
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Name() string {
+ return lv.name
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Size() (LogicalAddr, error) {
+ lv.init()
+ lastChunk := lv.logical2physical.Max()
+ if lastChunk == nil {
+ return 0, nil
+ }
+ return lastChunk.Value.LAddr.Add(lastChunk.Value.Size), nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) AddPhysicalVolume(id DeviceID, dev PhysicalVolume) error {
+ lv.init()
+ if other, exists := lv.id2pv[id]; exists {
+ return fmt.Errorf("(%p).AddPhysicalVolume: cannot add physical volume %q: already have physical volume %q with id=%v",
+ lv, dev.Name(), other.Name(), id)
+ }
+ lv.id2pv[id] = dev
+ lv.physical2logical[id] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) PhysicalVolumes() map[DeviceID]PhysicalVolume {
+ dup := make(map[DeviceID]PhysicalVolume, len(lv.id2pv))
+ for k, v := range lv.id2pv {
+ dup[k] = v
+ }
+ return dup
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ClearMappings() {
+ lv.logical2physical = nil
+ lv.physical2logical = nil
+}
+
+type Mapping struct {
+ LAddr LogicalAddr
+ PAddr QualifiedPhysicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) AddMapping(m Mapping) error {
+ lv.init()
+ // sanity check
+ if _, haveDev := lv.id2pv[m.PAddr.Dev]; !haveDev {
+ return fmt.Errorf("(%p).AddMapping: do not have a physical volume with id=%v",
+ lv, m.PAddr.Dev)
+ }
+
+ // logical2physical
+ newChunk := chunkMapping{
+ LAddr: m.LAddr,
+ PAddrs: []QualifiedPhysicalAddr{m.PAddr},
+ Size: m.Size,
+ SizeLocked: m.SizeLocked,
+ Flags: m.Flags,
+ }
+ logicalOverlaps := lv.logical2physical.SearchRange(newChunk.cmpRange)
+ var err error
+ newChunk, err = newChunk.union(logicalOverlaps...)
+ if err != nil {
+ return fmt.Errorf("(%p).AddMapping: %w", lv, err)
+ }
+
+ // physical2logical
+ newExt := devextMapping{
+ PAddr: m.PAddr.Addr,
+ LAddr: m.LAddr,
+ Size: m.Size,
+ SizeLocked: m.SizeLocked,
+ Flags: m.Flags,
+ }
+ physicalOverlaps := lv.physical2logical[m.PAddr.Dev].SearchRange(newExt.cmpRange)
+ newExt, err = newExt.union(physicalOverlaps...)
+ if err != nil {
+ return fmt.Errorf("(%p).AddMapping: %w", lv, err)
+ }
+
+ // optimize
+ if len(logicalOverlaps) == 1 && reflect.DeepEqual(newChunk, logicalOverlaps[0]) &&
+ len(physicalOverlaps) == 1 && reflect.DeepEqual(newExt, physicalOverlaps[0]) {
+ return nil
+ }
+
+ // logical2physical
+ for _, chunk := range logicalOverlaps {
+ lv.logical2physical.Delete(chunk.LAddr)
+ }
+ lv.logical2physical.Insert(newChunk)
+
+ // physical2logical
+ for _, ext := range physicalOverlaps {
+ lv.physical2logical[m.PAddr.Dev].Delete(ext.PAddr)
+ }
+ lv.physical2logical[m.PAddr.Dev].Insert(newExt)
+
+ // sanity check
+ //
+ // This is in-theory unnescessary, but that assumes that I
+ // made no mistakes in my algorithm above.
+ if os.Getenv("PARANOID") != "" {
+ if err := lv.fsck(); err != nil {
+ return err
+ }
+ }
+
+ // done
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) fsck() error {
+ physical2logical := make(map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping])
+ if err := lv.logical2physical.Walk(func(node *rbtree.Node[chunkMapping]) error {
+ chunk := node.Value
+ for _, stripe := range chunk.PAddrs {
+ if _, devOK := lv.id2pv[stripe.Dev]; !devOK {
+ return fmt.Errorf("(%p).fsck: chunk references physical volume %v which does not exist",
+ lv, stripe.Dev)
+ }
+ if _, exists := physical2logical[stripe.Dev]; !exists {
+ physical2logical[stripe.Dev] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ }
+ physical2logical[stripe.Dev].Insert(devextMapping{
+ PAddr: stripe.Addr,
+ LAddr: chunk.LAddr,
+ Size: chunk.Size,
+ Flags: chunk.Flags,
+ })
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ if len(lv.physical2logical) != len(physical2logical) {
+ return fmt.Errorf("(%p).fsck: skew between chunk tree and devext tree",
+ lv)
+ }
+ for devid := range lv.physical2logical {
+ if !lv.physical2logical[devid].Equal(physical2logical[devid]) {
+ return fmt.Errorf("(%p).fsck: skew between chunk tree and devext tree",
+ lv)
+ }
+ }
+
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Mappings() []Mapping {
+ var ret []Mapping
+ _ = lv.logical2physical.Walk(func(node *rbtree.Node[chunkMapping]) error {
+ chunk := node.Value
+ var flags *BlockGroupFlags
+ if chunk.Flags != nil {
+ val := *chunk.Flags
+ flags = &val
+ }
+ for _, slice := range chunk.PAddrs {
+ ret = append(ret, Mapping{
+ LAddr: chunk.LAddr,
+ PAddr: slice,
+ Size: chunk.Size,
+ Flags: flags,
+ })
+ }
+ return nil
+ })
+ return ret
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Resolve(laddr LogicalAddr) (paddrs map[QualifiedPhysicalAddr]struct{}, maxlen AddrDelta) {
+ node := lv.logical2physical.Search(func(chunk chunkMapping) int {
+ return chunkMapping{LAddr: laddr, Size: 1}.cmpRange(chunk)
+ })
+ if node == nil {
+ return nil, 0
+ }
+
+ chunk := node.Value
+
+ offsetWithinChunk := laddr.Sub(chunk.LAddr)
+ paddrs = make(map[QualifiedPhysicalAddr]struct{})
+ maxlen = chunk.Size - offsetWithinChunk
+ for _, stripe := range chunk.PAddrs {
+ paddrs[QualifiedPhysicalAddr{
+ Dev: stripe.Dev,
+ Addr: stripe.Addr.Add(offsetWithinChunk),
+ }] = struct{}{}
+ }
+
+ return paddrs, maxlen
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ResolveAny(laddr LogicalAddr, size AddrDelta) (LogicalAddr, QualifiedPhysicalAddr) {
+ node := lv.logical2physical.Search(func(chunk chunkMapping) int {
+ return chunkMapping{LAddr: laddr, Size: size}.cmpRange(chunk)
+ })
+ if node == nil {
+ return -1, QualifiedPhysicalAddr{0, -1}
+ }
+ return node.Value.LAddr, node.Value.PAddrs[0]
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) UnResolve(paddr QualifiedPhysicalAddr) LogicalAddr {
+ node := lv.physical2logical[paddr.Dev].Search(func(ext devextMapping) int {
+ return devextMapping{PAddr: paddr.Addr, Size: 1}.cmpRange(ext)
+ })
+ if node == nil {
+ return -1
+ }
+
+ ext := node.Value
+
+ offsetWithinExt := paddr.Addr.Sub(ext.PAddr)
+ return ext.LAddr.Add(offsetWithinExt)
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ReadAt(dat []byte, laddr LogicalAddr) (int, error) {
+ done := 0
+ for done < len(dat) {
+ n, err := lv.maybeShortReadAt(dat[done:], laddr+LogicalAddr(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) maybeShortReadAt(dat []byte, laddr LogicalAddr) (int, error) {
+ paddrs, maxlen := lv.Resolve(laddr)
+ if len(paddrs) == 0 {
+ return 0, fmt.Errorf("read: could not map logical address %v", laddr)
+ }
+ if AddrDelta(len(dat)) > maxlen {
+ dat = dat[:maxlen]
+ }
+
+ buf := make([]byte, len(dat))
+ first := true
+ for paddr := range paddrs {
+ dev, ok := lv.id2pv[paddr.Dev]
+ if !ok {
+ return 0, fmt.Errorf("device=%v does not exist", paddr.Dev)
+ }
+ if _, err := dev.ReadAt(buf, paddr.Addr); err != nil {
+ return 0, fmt.Errorf("read device=%v paddr=%v: %w", paddr.Dev, paddr.Addr, err)
+ }
+ if first {
+ copy(dat, buf)
+ } else {
+ if !bytes.Equal(dat, buf) {
+ return 0, fmt.Errorf("inconsistent stripes at laddr=%v len=%v", laddr, len(dat))
+ }
+ }
+ }
+ return len(dat), nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) WriteAt(dat []byte, laddr LogicalAddr) (int, error) {
+ done := 0
+ for done < len(dat) {
+ n, err := lv.maybeShortWriteAt(dat[done:], laddr+LogicalAddr(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) maybeShortWriteAt(dat []byte, laddr LogicalAddr) (int, error) {
+ paddrs, maxlen := lv.Resolve(laddr)
+ if len(paddrs) == 0 {
+ return 0, fmt.Errorf("write: could not map logical address %v", laddr)
+ }
+ if AddrDelta(len(dat)) > maxlen {
+ dat = dat[:maxlen]
+ }
+
+ for paddr := range paddrs {
+ dev, ok := lv.id2pv[paddr.Dev]
+ if !ok {
+ return 0, fmt.Errorf("device=%v does not exist", paddr.Dev)
+ }
+ if _, err := dev.WriteAt(dat, paddr.Addr); err != nil {
+ return 0, fmt.Errorf("write device=%v paddr=%v: %w", paddr.Dev, paddr.Addr, err)
+ }
+ }
+ return len(dat), nil
+}
diff --git a/lib/btrfs/internal/itemtype.go b/lib/btrfs/internal/itemtype.go
new file mode 100644
index 0000000..60731aa
--- /dev/null
+++ b/lib/btrfs/internal/itemtype.go
@@ -0,0 +1,77 @@
+// Code generated by Make. DO NOT EDIT.
+
+package internal
+
+import "fmt"
+
+type ItemType uint8
+
+const (
+ BLOCK_GROUP_ITEM_KEY = ItemType(192)
+ CHUNK_ITEM_KEY = ItemType(228)
+ DEV_EXTENT_KEY = ItemType(204)
+ DEV_ITEM_KEY = ItemType(216)
+ DIR_INDEX_KEY = ItemType(96)
+ DIR_ITEM_KEY = ItemType(84)
+ EXTENT_CSUM_KEY = ItemType(128)
+ EXTENT_DATA_KEY = ItemType(108)
+ EXTENT_DATA_REF_KEY = ItemType(178)
+ EXTENT_ITEM_KEY = ItemType(168)
+ FREE_SPACE_BITMAP_KEY = ItemType(200)
+ FREE_SPACE_EXTENT_KEY = ItemType(199)
+ FREE_SPACE_INFO_KEY = ItemType(198)
+ INODE_ITEM_KEY = ItemType(1)
+ INODE_REF_KEY = ItemType(12)
+ METADATA_ITEM_KEY = ItemType(169)
+ ORPHAN_ITEM_KEY = ItemType(48)
+ PERSISTENT_ITEM_KEY = ItemType(249)
+ QGROUP_RELATION_KEY = ItemType(246)
+ ROOT_BACKREF_KEY = ItemType(144)
+ ROOT_ITEM_KEY = ItemType(132)
+ ROOT_REF_KEY = ItemType(156)
+ SHARED_BLOCK_REF_KEY = ItemType(182)
+ SHARED_DATA_REF_KEY = ItemType(184)
+ TREE_BLOCK_REF_KEY = ItemType(176)
+ UNTYPED_KEY = ItemType(0)
+ UUID_RECEIVED_SUBVOL_KEY = ItemType(252)
+ UUID_SUBVOL_KEY = ItemType(251)
+ XATTR_ITEM_KEY = ItemType(24)
+)
+
+func (t ItemType) String() string {
+ names := map[ItemType]string{
+ BLOCK_GROUP_ITEM_KEY: "BLOCK_GROUP_ITEM",
+ CHUNK_ITEM_KEY: "CHUNK_ITEM",
+ DEV_EXTENT_KEY: "DEV_EXTENT",
+ DEV_ITEM_KEY: "DEV_ITEM",
+ DIR_INDEX_KEY: "DIR_INDEX",
+ DIR_ITEM_KEY: "DIR_ITEM",
+ EXTENT_CSUM_KEY: "EXTENT_CSUM",
+ EXTENT_DATA_KEY: "EXTENT_DATA",
+ EXTENT_DATA_REF_KEY: "EXTENT_DATA_REF",
+ EXTENT_ITEM_KEY: "EXTENT_ITEM",
+ FREE_SPACE_BITMAP_KEY: "FREE_SPACE_BITMAP",
+ FREE_SPACE_EXTENT_KEY: "FREE_SPACE_EXTENT",
+ FREE_SPACE_INFO_KEY: "FREE_SPACE_INFO",
+ INODE_ITEM_KEY: "INODE_ITEM",
+ INODE_REF_KEY: "INODE_REF",
+ METADATA_ITEM_KEY: "METADATA_ITEM",
+ ORPHAN_ITEM_KEY: "ORPHAN_ITEM",
+ PERSISTENT_ITEM_KEY: "PERSISTENT_ITEM",
+ QGROUP_RELATION_KEY: "QGROUP_RELATION",
+ ROOT_BACKREF_KEY: "ROOT_BACKREF",
+ ROOT_ITEM_KEY: "ROOT_ITEM",
+ ROOT_REF_KEY: "ROOT_REF",
+ SHARED_BLOCK_REF_KEY: "SHARED_BLOCK_REF",
+ SHARED_DATA_REF_KEY: "SHARED_DATA_REF",
+ TREE_BLOCK_REF_KEY: "TREE_BLOCK_REF",
+ UNTYPED_KEY: "UNTYPED",
+ UUID_RECEIVED_SUBVOL_KEY: "UUID_KEY_RECEIVED_SUBVOL",
+ UUID_SUBVOL_KEY: "UUID_KEY_SUBVOL",
+ XATTR_ITEM_KEY: "XATTR_ITEM",
+ }
+ if name, ok := names[t]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", t)
+}
diff --git a/lib/btrfs/internal/misc.go b/lib/btrfs/internal/misc.go
new file mode 100644
index 0000000..fba1d38
--- /dev/null
+++ b/lib/btrfs/internal/misc.go
@@ -0,0 +1,37 @@
+package internal
+
+import (
+ "time"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Generation uint64
+
+type Key struct {
+ ObjectID ObjID `bin:"off=0x0, siz=0x8"` // Each tree has its own set of Object IDs.
+ ItemType ItemType `bin:"off=0x8, siz=0x1"`
+ Offset uint64 `bin:"off=0x9, siz=0x8"` // The meaning depends on the item type.
+ binstruct.End `bin:"off=0x11"`
+}
+
+func (a Key) Cmp(b Key) int {
+ if d := util.CmpUint(a.ObjectID, b.ObjectID); d != 0 {
+ return d
+ }
+ if d := util.CmpUint(a.ItemType, b.ItemType); d != 0 {
+ return d
+ }
+ return util.CmpUint(a.Offset, b.Offset)
+}
+
+type Time struct {
+ Sec int64 `bin:"off=0x0, siz=0x8"` // Number of seconds since 1970-01-01T00:00:00Z.
+ NSec uint32 `bin:"off=0x8, siz=0x4"` // Number of nanoseconds since the beginning of the second.
+ binstruct.End `bin:"off=0xc"`
+}
+
+func (t Time) ToStd() time.Time {
+ return time.Unix(t.Sec, int64(t.NSec))
+}
diff --git a/lib/btrfs/internal/objid.go b/lib/btrfs/internal/objid.go
new file mode 100644
index 0000000..8c9c002
--- /dev/null
+++ b/lib/btrfs/internal/objid.go
@@ -0,0 +1,144 @@
+package internal
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type ObjID uint64
+
+const (
+ // The IDs of the various trees
+ ROOT_TREE_OBJECTID = ObjID(1) // holds pointers to all of the tree roots
+ EXTENT_TREE_OBJECTID = ObjID(2) // stores information about which extents are in use, and reference counts
+ CHUNK_TREE_OBJECTID = ObjID(3) // chunk tree stores translations from logical -> physical block numbering
+ DEV_TREE_OBJECTID = ObjID(4) // stores info about which areas of a given device are in use; one per device
+ FS_TREE_OBJECTID = ObjID(5) // one per subvolume, storing files and directories
+ ROOT_TREE_DIR_OBJECTID = ObjID(6) // directory objectid inside the root tree
+ CSUM_TREE_OBJECTID = ObjID(7) // holds checksums of all the data extents
+ QUOTA_TREE_OBJECTID = ObjID(8)
+ UUID_TREE_OBJECTID = ObjID(9) // for storing items that use the UUID_*_KEY
+ FREE_SPACE_TREE_OBJECTID = ObjID(10) // tracks free space in block groups.
+ BLOCK_GROUP_TREE_OBJECTID = ObjID(11) // hold the block group items.
+
+ // Objects in the DEV_TREE
+ DEV_STATS_OBJECTID = ObjID(0) // device stats in the device tree
+
+ // ???
+ BALANCE_OBJECTID = ObjID(util.MaxUint64pp - 4) // for storing balance parameters in the root tree
+ ORPHAN_OBJECTID = ObjID(util.MaxUint64pp - 5) // orphan objectid for tracking unlinked/truncated files
+ TREE_LOG_OBJECTID = ObjID(util.MaxUint64pp - 6) // does write ahead logging to speed up fsyncs
+ TREE_LOG_FIXUP_OBJECTID = ObjID(util.MaxUint64pp - 7)
+ TREE_RELOC_OBJECTID = ObjID(util.MaxUint64pp - 8) // space balancing
+ DATA_RELOC_TREE_OBJECTID = ObjID(util.MaxUint64pp - 9)
+ EXTENT_CSUM_OBJECTID = ObjID(util.MaxUint64pp - 10) // extent checksums all have this objectid
+ FREE_SPACE_OBJECTID = ObjID(util.MaxUint64pp - 11) // For storing free space cache
+ FREE_INO_OBJECTID = ObjID(util.MaxUint64pp - 12) // stores the inode number for the free-ino cache
+
+ MULTIPLE_OBJECTIDS = ObjID(util.MaxUint64pp - 255) // dummy objectid represents multiple objectids
+
+ // All files have objectids in this range.
+ FIRST_FREE_OBJECTID = ObjID(256)
+ LAST_FREE_OBJECTID = ObjID(util.MaxUint64pp - 256)
+
+ FIRST_CHUNK_TREE_OBJECTID = ObjID(256)
+
+ // Objects in the CHUNK_TREE
+ DEV_ITEMS_OBJECTID = ObjID(1)
+
+ // ???
+ EMPTY_SUBVOL_DIR_OBJECTID = ObjID(2)
+)
+
+func (id ObjID) Format(typ ItemType) string {
+ switch typ {
+ case PERSISTENT_ITEM_KEY:
+ names := map[ObjID]string{
+ DEV_STATS_OBJECTID: "DEV_STATS",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ case DEV_EXTENT_KEY:
+ return fmt.Sprintf("%d", int64(id))
+ case QGROUP_RELATION_KEY:
+ return fmt.Sprintf("%d/%d",
+ uint64(id)>>48,
+ uint64(id)&((1<<48)-1))
+ case UUID_SUBVOL_KEY, UUID_RECEIVED_SUBVOL_KEY:
+ return fmt.Sprintf("%#016x", uint64(id))
+ case DEV_ITEM_KEY:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ DEV_ITEMS_OBJECTID: "DEV_ITEMS",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ case CHUNK_ITEM_KEY:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ FIRST_CHUNK_TREE_OBJECTID: "FIRST_CHUNK_TREE",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ default:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ ROOT_TREE_OBJECTID: "ROOT_TREE",
+ EXTENT_TREE_OBJECTID: "EXTENT_TREE",
+ CHUNK_TREE_OBJECTID: "CHUNK_TREE",
+ DEV_TREE_OBJECTID: "DEV_TREE",
+ FS_TREE_OBJECTID: "FS_TREE",
+ ROOT_TREE_DIR_OBJECTID: "ROOT_TREE_DIR",
+ CSUM_TREE_OBJECTID: "CSUM_TREE",
+ QUOTA_TREE_OBJECTID: "QUOTA_TREE",
+ UUID_TREE_OBJECTID: "UUID_TREE",
+ FREE_SPACE_TREE_OBJECTID: "FREE_SPACE_TREE",
+ BLOCK_GROUP_TREE_OBJECTID: "BLOCK_GROUP_TREE",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ }
+}
+
+func (id ObjID) String() string {
+ return id.Format(UNTYPED_KEY)
+}
diff --git a/lib/btrfs/io1_pv.go b/lib/btrfs/io1_pv.go
new file mode 100644
index 0000000..cababaf
--- /dev/null
+++ b/lib/btrfs/io1_pv.go
@@ -0,0 +1,96 @@
+package btrfs
+
+import (
+ "fmt"
+ "os"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Device struct {
+ *os.File
+
+ cacheSuperblocks []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ cacheSuperblock *util.Ref[btrfsvol.PhysicalAddr, Superblock]
+}
+
+var _ util.File[btrfsvol.PhysicalAddr] = (*Device)(nil)
+
+func (dev Device) Size() (btrfsvol.PhysicalAddr, error) {
+ fi, err := dev.Stat()
+ if err != nil {
+ return 0, err
+ }
+ return btrfsvol.PhysicalAddr(fi.Size()), nil
+}
+
+func (dev *Device) ReadAt(dat []byte, paddr btrfsvol.PhysicalAddr) (int, error) {
+ return dev.File.ReadAt(dat, int64(paddr))
+}
+
+func (dev *Device) WriteAt(dat []byte, paddr btrfsvol.PhysicalAddr) (int, error) {
+ return dev.File.WriteAt(dat, int64(paddr))
+}
+
+var SuperblockAddrs = []btrfsvol.PhysicalAddr{
+ 0x00_0001_0000, // 64KiB
+ 0x00_0400_0000, // 64MiB
+ 0x40_0000_0000, // 256GiB
+}
+
+func (dev *Device) Superblocks() ([]*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if dev.cacheSuperblocks != nil {
+ return dev.cacheSuperblocks, nil
+ }
+ superblockSize := btrfsvol.PhysicalAddr(binstruct.StaticSize(Superblock{}))
+
+ sz, err := dev.Size()
+ if err != nil {
+ return nil, err
+ }
+
+ var ret []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ for i, addr := range SuperblockAddrs {
+ if addr+superblockSize <= sz {
+ superblock := &util.Ref[btrfsvol.PhysicalAddr, Superblock]{
+ File: dev,
+ Addr: addr,
+ }
+ if err := superblock.Read(); err != nil {
+ return nil, fmt.Errorf("superblock %v: %w", i, err)
+ }
+ ret = append(ret, superblock)
+ }
+ }
+ if len(ret) == 0 {
+ return nil, fmt.Errorf("no superblocks")
+ }
+ dev.cacheSuperblocks = ret
+ return ret, nil
+}
+
+func (dev *Device) Superblock() (*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if dev.cacheSuperblock != nil {
+ return dev.cacheSuperblock, nil
+ }
+ sbs, err := dev.Superblocks()
+ if err != nil {
+ return nil, err
+ }
+
+ for i, sb := range sbs {
+ if err := sb.Data.ValidateChecksum(); err != nil {
+ return nil, fmt.Errorf("superblock %v: %w", i, err)
+ }
+ if i > 0 {
+ if !sb.Data.Equal(sbs[0].Data) {
+ return nil, fmt.Errorf("superblock %v and superblock %v disagree", 0, i)
+ }
+ }
+ }
+
+ dev.cacheSuperblock = sbs[0]
+ return sbs[0], nil
+}
diff --git a/lib/btrfs/io2_lv.go b/lib/btrfs/io2_lv.go
new file mode 100644
index 0000000..c6ef9e0
--- /dev/null
+++ b/lib/btrfs/io2_lv.go
@@ -0,0 +1,187 @@
+package btrfs
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type FS struct {
+ // You should probably not access .LV directly, except when
+ // implementing special things like fsck.
+ LV btrfsvol.LogicalVolume[*Device]
+
+ cacheSuperblocks []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ cacheSuperblock *util.Ref[btrfsvol.PhysicalAddr, Superblock]
+}
+
+var _ util.File[btrfsvol.LogicalAddr] = (*FS)(nil)
+
+func (fs *FS) AddDevice(dev *Device) error {
+ sb, err := dev.Superblock()
+ if err != nil {
+ return err
+ }
+ if err := fs.LV.AddPhysicalVolume(sb.Data.DevItem.DevID, dev); err != nil {
+ return err
+ }
+ fs.cacheSuperblocks = nil
+ fs.cacheSuperblock = nil
+ if err := fs.initDev(sb); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fs *FS) Name() string {
+ if name := fs.LV.Name(); name != "" {
+ return name
+ }
+ sb, err := fs.Superblock()
+ if err != nil {
+ return fmt.Sprintf("fs_uuid=%v", "(unreadable)")
+ }
+ name := fmt.Sprintf("fs_uuid=%v", sb.Data.FSUUID)
+ fs.LV.SetName(name)
+ return name
+}
+
+func (fs *FS) Size() (btrfsvol.LogicalAddr, error) {
+ return fs.LV.Size()
+}
+
+func (fs *FS) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
+ return fs.LV.ReadAt(p, off)
+}
+func (fs *FS) WriteAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
+ return fs.LV.WriteAt(p, off)
+}
+
+func (fs *FS) Resolve(laddr btrfsvol.LogicalAddr) (paddrs map[btrfsvol.QualifiedPhysicalAddr]struct{}, maxlen btrfsvol.AddrDelta) {
+ return fs.LV.Resolve(laddr)
+}
+
+func (fs *FS) Superblocks() ([]*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if fs.cacheSuperblocks != nil {
+ return fs.cacheSuperblocks, nil
+ }
+ var ret []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ devs := fs.LV.PhysicalVolumes()
+ if len(devs) == 0 {
+ return nil, fmt.Errorf("no devices")
+ }
+ for _, dev := range devs {
+ sbs, err := dev.Superblocks()
+ if err != nil {
+ return nil, fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ ret = append(ret, sbs...)
+ }
+ fs.cacheSuperblocks = ret
+ return ret, nil
+}
+
+func (fs *FS) Superblock() (*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if fs.cacheSuperblock != nil {
+ return fs.cacheSuperblock, nil
+ }
+ sbs, err := fs.Superblocks()
+ if err != nil {
+ return nil, err
+ }
+ if len(sbs) == 0 {
+ return nil, fmt.Errorf("no superblocks")
+ }
+
+ fname := ""
+ sbi := 0
+ for i, sb := range sbs {
+ if sb.File.Name() != fname {
+ fname = sb.File.Name()
+ sbi = 0
+ } else {
+ sbi++
+ }
+
+ if err := sb.Data.ValidateChecksum(); err != nil {
+ return nil, fmt.Errorf("file %q superblock %v: %w", sb.File.Name(), sbi, err)
+ }
+ if i > 0 {
+ // FIXME(lukeshu): This is probably wrong, but
+ // lots of my multi-device code is probably
+ // wrong.
+ if !sb.Data.Equal(sbs[0].Data) {
+ return nil, fmt.Errorf("file %q superblock %v and file %q superblock %v disagree",
+ sbs[0].File.Name(), 0,
+ sb.File.Name(), sbi)
+ }
+ }
+ }
+
+ fs.cacheSuperblock = sbs[0]
+ return sbs[0], nil
+}
+
+func (fs *FS) ReInit() error {
+ fs.LV.ClearMappings()
+ for _, dev := range fs.LV.PhysicalVolumes() {
+ sb, err := dev.Superblock()
+ if err != nil {
+ return fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ if err := fs.initDev(sb); err != nil {
+ return fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ }
+ return nil
+}
+
+func (fs *FS) initDev(sb *util.Ref[btrfsvol.PhysicalAddr, Superblock]) error {
+ syschunks, err := sb.Data.ParseSysChunkArray()
+ if err != nil {
+ return err
+ }
+ for _, chunk := range syschunks {
+ for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ return err
+ }
+ }
+ }
+ if err := fs.TreeWalk(CHUNK_TREE_OBJECTID, TreeWalkHandler{
+ Item: func(_ TreePath, item Item) error {
+ if item.Head.Key.ItemType != btrfsitem.CHUNK_ITEM_KEY {
+ return nil
+ }
+ for _, mapping := range item.Body.(btrfsitem.Chunk).Mappings(item.Head.Key) {
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fs *FS) Close() error {
+ var errs derror.MultiError
+ for _, dev := range fs.LV.PhysicalVolumes() {
+ if err := dev.Close(); err != nil && err == nil {
+ errs = append(errs, err)
+ }
+ }
+ if errs != nil {
+ return errs
+ }
+ return nil
+}
+
+var _ io.Closer = (*FS)(nil)
diff --git a/lib/btrfs/io3_btree.go b/lib/btrfs/io3_btree.go
new file mode 100644
index 0000000..dbc2ac1
--- /dev/null
+++ b/lib/btrfs/io3_btree.go
@@ -0,0 +1,562 @@
+package btrfs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ iofs "io/fs"
+ "strings"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// - The first element will always have an ItemIdx of -1.
+//
+// - For .Item() callbacks, the last element will always have a
+// NodeAddr of 0.
+//
+// For example, given the tree structure
+//
+// [superblock]
+// |
+// | <------------------------------------------ pathElem={idx:-1, addr:0x01, lvl:3}
+// |
+// +[0x01]-----------+
+// | lvl=3 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <------------------------------ pathElem={idx:8, addr:0x02, lvl:2}
+// |
+// +[0x02]-----------+
+// | lvl=2 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <-------------------- pathElem={idx:7, addr:0x03, lvl:1}
+// |
+// +[0x03]-----------+
+// | lvl=1 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <---------------- pathElem={idx:4, addr:0x04, lvl:0}
+// |
+// +[0x04]-----------+
+// | lvl=0 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <--------------- pathElem={idx:5, addr:0, lvl:0}
+// |
+// [item]
+//
+// the path would be
+//
+// {-1, 0x01, 3}→{8, 0x02, 2}→{7, 0x03, 1}→{4, 0x04, 0}→{2, 0, 0}
+type TreePath []TreePathElem
+
+// A TreePathElem essentially represents a KeyPointer.
+type TreePathElem struct {
+ // ItemIdx is the index of this KeyPointer in the parent Node;
+ // or -1 if this is the root and there is no KeyPointer.
+ ItemIdx int
+ // NodeAddr is the address of the node that the KeyPointer
+ // points at, or 0 if this is a leaf item and nothing is
+ // being pointed at.
+ NodeAddr btrfsvol.LogicalAddr
+ // NodeLevel is the expected or actual level of the node at
+ // NodeAddr.
+ NodeLevel uint8
+}
+
+func (elem TreePathElem) writeNodeTo(w io.Writer) {
+ fmt.Fprintf(w, "node:%d@%v", elem.NodeLevel, elem.NodeAddr)
+}
+
+func (path TreePath) String() string {
+ if len(path) == 0 {
+ return "(empty-path)"
+ }
+ var ret strings.Builder
+ path[0].writeNodeTo(&ret)
+ for _, elem := range path[1:] {
+ fmt.Fprintf(&ret, "[%v]", elem.ItemIdx)
+ if elem.NodeAddr != 0 {
+ ret.WriteString("->")
+ elem.writeNodeTo(&ret)
+ }
+ }
+ return ret.String()
+}
+
+// A treeRoot is more-or-less a btrfsitem.Root, but simpler and generalized for
+type treeRoot struct {
+ TreeID ObjID
+ RootNode btrfsvol.LogicalAddr
+ Level uint8
+ Generation Generation
+}
+
+func (fs *FS) lookupTree(treeID ObjID) (*treeRoot, error) {
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, err
+ }
+ switch treeID {
+ case ROOT_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.RootTree,
+ Level: sb.Data.RootLevel,
+ Generation: sb.Data.Generation, // XXX: same generation as LOG_TREE?
+ }, nil
+ case CHUNK_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.ChunkTree,
+ Level: sb.Data.ChunkLevel,
+ Generation: sb.Data.ChunkRootGeneration,
+ }, nil
+ case TREE_LOG_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.LogTree,
+ Level: sb.Data.LogLevel,
+ Generation: sb.Data.Generation, // XXX: same generation as ROOT_TREE?
+ }, nil
+ case BLOCK_GROUP_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.BlockGroupRoot,
+ Level: sb.Data.BlockGroupRootLevel,
+ Generation: sb.Data.BlockGroupRootGeneration,
+ }, nil
+ default:
+ rootItem, err := fs.TreeSearch(ROOT_TREE_OBJECTID, func(key Key) int {
+ if key.ObjectID == treeID && key.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ return 0
+ }
+ return Key{
+ ObjectID: treeID,
+ ItemType: btrfsitem.ROOT_ITEM_KEY,
+ Offset: 0,
+ }.Cmp(key)
+ })
+ if err != nil {
+ return nil, err
+ }
+ rootItemBody, ok := rootItem.Body.(btrfsitem.Root)
+ if !ok {
+ return nil, fmt.Errorf("malformed ROOT_ITEM for tree %v", treeID)
+ }
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: rootItemBody.ByteNr,
+ Level: rootItemBody.Level,
+ Generation: rootItemBody.Generation,
+ }, nil
+ }
+}
+
+type TreeWalkHandler struct {
+ // Callbacks for entire nodes
+ PreNode func(TreePath) error
+ Node func(TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) error
+ PostNode func(TreePath, *util.Ref[btrfsvol.LogicalAddr, Node]) error
+ // Callbacks for items on internal nodes
+ PreKeyPointer func(TreePath, KeyPointer) error
+ PostKeyPointer func(TreePath, KeyPointer) error
+ // Callbacks for items on leaf nodes
+ Item func(TreePath, Item) error
+}
+
+// The lifecycle of callbacks is:
+//
+// 001 .PreNode()
+// 002 (read node)
+// 003 .Node()
+// for item in node.items:
+// if internal:
+// 004 .PreKeyPointer()
+// 005 (recurse)
+// 006 .PostKeyPointer()
+// else:
+// 004 .Item()
+// 007 .PostNode()
+func (fs *FS) TreeWalk(treeID ObjID, cbs TreeWalkHandler) error {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return err
+ }
+ path := TreePath{
+ TreePathElem{
+ ItemIdx: -1,
+ NodeAddr: rootInfo.RootNode,
+ NodeLevel: rootInfo.Level,
+ },
+ }
+ return fs.treeWalk(path, cbs)
+}
+
+func (fs *FS) treeWalk(path TreePath, cbs TreeWalkHandler) error {
+ if path[len(path)-1].NodeAddr == 0 {
+ return nil
+ }
+
+ if cbs.PreNode != nil {
+ if err := cbs.PreNode(path); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return err
+ }
+ }
+ node, err := fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if node != nil && err == nil {
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+ }
+ if cbs.Node != nil {
+ err = cbs.Node(path, node, err)
+ }
+ if err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return fmt.Errorf("btrfs.FS.TreeWalk: %w", err)
+ }
+ if node != nil {
+ for i, item := range node.Data.BodyInternal {
+ itemPath := append(path, TreePathElem{
+ ItemIdx: i,
+ NodeAddr: item.BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ if cbs.PreKeyPointer != nil {
+ if err := cbs.PreKeyPointer(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return err
+ }
+ }
+ if err := fs.treeWalk(itemPath, cbs); err != nil {
+ return err
+ }
+ if cbs.PostKeyPointer != nil {
+ if err := cbs.PostKeyPointer(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return err
+ }
+ }
+ }
+ for i, item := range node.Data.BodyLeaf {
+ if cbs.Item != nil {
+ itemPath := append(path, TreePathElem{
+ ItemIdx: i,
+ })
+ if err := cbs.Item(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return fmt.Errorf("btrfs.FS.TreeWalk: callback: %w", err)
+ }
+ }
+ }
+ }
+ if cbs.PostNode != nil {
+ if err := cbs.PostNode(path, node); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (fs *FS) treeSearch(treeRoot treeRoot, fn func(Key) int) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ path := TreePath{
+ TreePathElem{
+ ItemIdx: -1,
+ NodeAddr: treeRoot.RootNode,
+ NodeLevel: treeRoot.Level,
+ },
+ }
+ for {
+ if path[len(path)-1].NodeAddr == 0 {
+ return nil, nil, iofs.ErrNotExist
+ }
+ node, err := fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+
+ if node.Data.Head.Level > 0 {
+ // internal node
+
+ // Search for the right-most node.Data.BodyInternal item for which
+ // `fn(item.Key) >= 0`.
+ //
+ // + + + + 0 - - - -
+ //
+ // There may or may not be a value that returns '0'.
+ //
+ // Implement this search as a binary search.
+ lastGood := -1
+ firstBad := len(node.Data.BodyInternal)
+ for firstBad > lastGood+1 {
+ midpoint := (lastGood + firstBad) / 2
+ direction := fn(node.Data.BodyInternal[midpoint].Key)
+ if direction < 0 {
+ firstBad = midpoint
+ } else {
+ lastGood = midpoint
+ }
+ }
+ if lastGood < 0 {
+ return nil, nil, iofs.ErrNotExist
+ }
+ path = append(path, TreePathElem{
+ ItemIdx: lastGood,
+ NodeAddr: node.Data.BodyInternal[lastGood].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ // leaf node
+
+ // Search for a member of node.Data.BodyLeaf for which
+ // `fn(item.Head.Key) == 0`.
+ //
+ // + + + + 0 - - - -
+ //
+ // Such an item might not exist; in this case, return nil/ErrNotExist.
+ // Multiple such items might exist; in this case, it does not matter which
+ // is returned.
+ //
+ // Implement this search as a binary search.
+ beg := 0
+ end := len(node.Data.BodyLeaf)
+ for beg < end {
+ midpoint := (beg + end) / 2
+ direction := fn(node.Data.BodyLeaf[midpoint].Head.Key)
+ switch {
+ case direction < 0:
+ end = midpoint
+ case direction > 0:
+ beg = midpoint + 1
+ case direction == 0:
+ path = append(path, TreePathElem{
+ ItemIdx: midpoint,
+ })
+ return path, node, nil
+ }
+ }
+ return nil, nil, iofs.ErrNotExist
+ }
+ }
+}
+
+func (fs *FS) prev(path TreePath, node *util.Ref[btrfsvol.LogicalAddr, Node]) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ var err error
+ path = append(TreePath(nil), path...)
+
+ // go up
+ for path[len(path)-1].ItemIdx < 1 {
+ path = path[:len(path)-1]
+ if len(path) == 0 {
+ return nil, nil, nil
+ }
+ }
+ // go left
+ path[len(path)-1].ItemIdx--
+ if path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeAddr = node.Data.BodyInternal[path[len(path)-1].ItemIdx].BlockPtr
+ }
+ }
+ // go down
+ for path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-1].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if node.Data.Head.Level > 0 {
+ path = append(path, TreePathElem{
+ ItemIdx: len(node.Data.BodyInternal) - 1,
+ NodeAddr: node.Data.BodyInternal[len(node.Data.BodyInternal)-1].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ path = append(path, TreePathElem{
+ ItemIdx: len(node.Data.BodyLeaf) - 1,
+ })
+ }
+ }
+ // return
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return path, node, nil
+}
+
+func (fs *FS) next(path TreePath, node *util.Ref[btrfsvol.LogicalAddr, Node]) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ var err error
+ path = append(TreePath(nil), path...)
+
+ // go up
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-2].NodeLevel = node.Data.Head.Level
+ }
+ for path[len(path)-1].ItemIdx+1 >= int(node.Data.Head.NumItems) {
+ path = path[:len(path)-1]
+ if len(path) == 1 {
+ return nil, nil, nil
+ }
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-2].NodeLevel = node.Data.Head.Level
+ }
+ }
+ // go left
+ path[len(path)-1].ItemIdx++
+ if path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeAddr = node.Data.BodyInternal[path[len(path)-1].ItemIdx].BlockPtr
+ }
+ }
+ // go down
+ for path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-1].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+ }
+ if node.Data.Head.Level > 0 {
+ path = append(path, TreePathElem{
+ ItemIdx: 0,
+ NodeAddr: node.Data.BodyInternal[len(node.Data.BodyInternal)-1].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ path = append(path, TreePathElem{
+ ItemIdx: 0,
+ })
+ }
+ }
+ // return
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return path, node, nil
+}
+
+func (fs *FS) TreeSearch(treeID ObjID, fn func(Key) int) (Item, error) {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return Item{}, err
+ }
+ path, node, err := fs.treeSearch(*rootInfo, fn)
+ if err != nil {
+ return Item{}, err
+ }
+ return node.Data.BodyLeaf[path[len(path)-1].ItemIdx], nil
+}
+
+func (fs *FS) TreeLookup(treeID ObjID, key Key) (Item, error) {
+ item, err := fs.TreeSearch(treeID, key.Cmp)
+ if err != nil {
+ err = fmt.Errorf("item with key=%v: %w", key, err)
+ }
+ return item, err
+}
+
+// If some items are able to be read, but there is an error reading the full set, then it might
+// return *both* a list of items and an error.
+//
+// If no such item is found, an error that is io/fs.ErrNotExist is returned.
+func (fs *FS) TreeSearchAll(treeID ObjID, fn func(Key) int) ([]Item, error) {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return nil, err
+ }
+ middlePath, middleNode, err := fs.treeSearch(*rootInfo, fn)
+ if err != nil {
+ return nil, err
+ }
+ middleItem := middleNode.Data.BodyLeaf[middlePath[len(middlePath)-1].ItemIdx]
+
+ var ret = []Item{middleItem}
+ var errs derror.MultiError
+ for prevPath, prevNode := middlePath, middleNode; true; {
+ prevPath, prevNode, err = fs.prev(prevPath, prevNode)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ if prevPath == nil {
+ break
+ }
+ prevItem := prevNode.Data.BodyLeaf[prevPath[len(prevPath)-1].ItemIdx]
+ if fn(prevItem.Head.Key) != 0 {
+ break
+ }
+ ret = append(ret, prevItem)
+ }
+ util.ReverseSlice(ret)
+ for nextPath, nextNode := middlePath, middleNode; true; {
+ nextPath, nextNode, err = fs.next(nextPath, nextNode)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ if nextPath == nil {
+ break
+ }
+ nextItem := nextNode.Data.BodyLeaf[nextPath[len(nextPath)-1].ItemIdx]
+ if fn(nextItem.Head.Key) != 0 {
+ break
+ }
+ ret = append(ret, nextItem)
+ }
+ if errs != nil {
+ err = errs
+ }
+ return ret, err
+}
diff --git a/lib/btrfs/io4_fs.go b/lib/btrfs/io4_fs.go
new file mode 100644
index 0000000..7ae30cb
--- /dev/null
+++ b/lib/btrfs/io4_fs.go
@@ -0,0 +1,404 @@
+package btrfs
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "sync"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type BareInode struct {
+ Inode ObjID
+ InodeItem *btrfsitem.Inode
+ Errs derror.MultiError
+}
+
+type FullInode struct {
+ BareInode
+ OtherItems []Item
+}
+
+type InodeRef struct {
+ Inode ObjID
+ btrfsitem.InodeRef
+}
+
+type Dir struct {
+ FullInode
+ DotDot *InodeRef
+ ChildrenByName map[string]btrfsitem.DirEntry
+ ChildrenByIndex map[uint64]btrfsitem.DirEntry
+ SV *Subvolume
+}
+
+type FileExtent struct {
+ OffsetWithinFile int64
+ btrfsitem.FileExtent
+}
+
+type File struct {
+ FullInode
+ Extents []FileExtent
+ SV *Subvolume
+}
+
+type Subvolume struct {
+ FS *FS
+ TreeID ObjID
+
+ rootOnce sync.Once
+ rootVal btrfsitem.Root
+ rootErr error
+
+ bareInodeCache util.LRUCache[ObjID, *BareInode]
+ fullInodeCache util.LRUCache[ObjID, *FullInode]
+ dirCache util.LRUCache[ObjID, *Dir]
+ fileCache util.LRUCache[ObjID, *File]
+}
+
+func (sv *Subvolume) init() {
+ sv.rootOnce.Do(func() {
+ root, err := sv.FS.TreeLookup(ROOT_TREE_OBJECTID, Key{
+ ObjectID: sv.TreeID,
+ ItemType: btrfsitem.ROOT_ITEM_KEY,
+ Offset: 0,
+ })
+ if err != nil {
+ sv.rootErr = err
+ return
+ }
+
+ rootBody, ok := root.Body.(btrfsitem.Root)
+ if !ok {
+ sv.rootErr = fmt.Errorf("FS_TREE ROOT_ITEM has malformed body")
+ return
+ }
+
+ sv.rootVal = rootBody
+ })
+}
+
+func (sv *Subvolume) GetRootInode() (ObjID, error) {
+ sv.init()
+ return sv.rootVal.RootDirID, sv.rootErr
+}
+
+func (sv *Subvolume) LoadBareInode(inode ObjID) (*BareInode, error) {
+ val := sv.bareInodeCache.GetOrElse(inode, func() (val *BareInode) {
+ val = &BareInode{
+ Inode: inode,
+ }
+ item, err := sv.FS.TreeLookup(sv.TreeID, Key{
+ ObjectID: inode,
+ ItemType: btrfsitem.INODE_ITEM_KEY,
+ Offset: 0,
+ })
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+
+ itemBody, ok := item.Body.(btrfsitem.Inode)
+ if !ok {
+ val.Errs = append(val.Errs, fmt.Errorf("malformed inode"))
+ return
+ }
+ val.InodeItem = &itemBody
+
+ return
+ })
+ if val.InodeItem == nil {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (sv *Subvolume) LoadFullInode(inode ObjID) (*FullInode, error) {
+ val := sv.fullInodeCache.GetOrElse(inode, func() (val *FullInode) {
+ val = &FullInode{
+ BareInode: BareInode{
+ Inode: inode,
+ },
+ }
+ items, err := sv.FS.TreeSearchAll(sv.TreeID, func(key Key) int {
+ return util.CmpUint(inode, key.ObjectID)
+ })
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ if len(items) == 0 {
+ return
+ }
+ }
+ for _, item := range items {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ itemBody := item.Body.(btrfsitem.Inode)
+ if val.InodeItem != nil {
+ if !reflect.DeepEqual(itemBody, *val.InodeItem) {
+ val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
+ }
+ continue
+ }
+ val.InodeItem = &itemBody
+ default:
+ val.OtherItems = append(val.OtherItems, item)
+ }
+ }
+ return
+ })
+ if val.InodeItem == nil && val.OtherItems == nil {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (sv *Subvolume) LoadDir(inode ObjID) (*Dir, error) {
+ val := sv.dirCache.GetOrElse(inode, func() (val *Dir) {
+ val = new(Dir)
+ fullInode, err := sv.LoadFullInode(inode)
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+ val.FullInode = *fullInode
+ val.SV = sv
+ val.populate()
+ return
+ })
+ if val.Inode == 0 {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (ret *Dir) populate() {
+ ret.ChildrenByName = make(map[string]btrfsitem.DirEntry)
+ ret.ChildrenByIndex = make(map[uint64]btrfsitem.DirEntry)
+ for _, item := range ret.OtherItems {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_REF_KEY:
+ ref := InodeRef{
+ Inode: ObjID(item.Head.Key.Offset),
+ InodeRef: item.Body.(btrfsitem.InodeRef),
+ }
+ if ret.DotDot != nil {
+ if !reflect.DeepEqual(ref, *ret.DotDot) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple INODE_REF items on a directory"))
+ }
+ continue
+ }
+ ret.DotDot = &ref
+ case btrfsitem.DIR_ITEM_KEY:
+ body := item.Body.(btrfsitem.DirEntries)
+ if len(body) != 1 {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_ITEM?"))
+ continue
+ }
+ for _, entry := range body {
+ namehash := btrfsitem.NameHash(entry.Name)
+ if namehash != item.Head.Key.Offset {
+ ret.Errs = append(ret.Errs, fmt.Errorf("direntry crc32c mismatch: key=%#x crc32c(%q)=%#x",
+ item.Head.Key.Offset, entry.Name, namehash))
+ continue
+ }
+ if other, exists := ret.ChildrenByName[string(entry.Name)]; exists {
+ if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry name %q", entry.Name))
+ }
+ continue
+ }
+ ret.ChildrenByName[string(entry.Name)] = entry
+ }
+ case btrfsitem.DIR_INDEX_KEY:
+ index := item.Head.Key.Offset
+ body := item.Body.(btrfsitem.DirEntries)
+ if len(body) != 1 {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_INDEX?"))
+ continue
+ }
+ for _, entry := range body {
+ if other, exists := ret.ChildrenByIndex[index]; exists {
+ if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry index %v", index))
+ }
+ continue
+ }
+ ret.ChildrenByIndex[index] = entry
+ }
+ //case btrfsitem.XATTR_ITEM_KEY:
+ default:
+ panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
+ }
+ }
+ entriesWithIndexes := make(map[string]struct{})
+ nextIndex := uint64(2)
+ for index, entry := range ret.ChildrenByIndex {
+ if index+1 > nextIndex {
+ nextIndex = index + 1
+ }
+ entriesWithIndexes[string(entry.Name)] = struct{}{}
+ if other, exists := ret.ChildrenByName[string(entry.Name)]; !exists {
+ ret.Errs = append(ret.Errs, fmt.Errorf("missing by-name direntry for %q", entry.Name))
+ ret.ChildrenByName[string(entry.Name)] = entry
+ } else if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("direntry index %v and direntry name %q disagree", index, entry.Name))
+ ret.ChildrenByName[string(entry.Name)] = entry
+ }
+ }
+ for _, name := range util.SortedMapKeys(ret.ChildrenByName) {
+ if _, exists := entriesWithIndexes[name]; !exists {
+ ret.Errs = append(ret.Errs, fmt.Errorf("missing by-index direntry for %q", name))
+ ret.ChildrenByIndex[nextIndex] = ret.ChildrenByName[name]
+ nextIndex++
+ }
+ }
+}
+
+func (dir *Dir) AbsPath() (string, error) {
+ rootInode, err := dir.SV.GetRootInode()
+ if err != nil {
+ return "", err
+ }
+ if rootInode == dir.Inode {
+ return "/", nil
+ }
+ if dir.DotDot == nil {
+ return "", fmt.Errorf("missing .. entry in dir inode %v", dir.Inode)
+ }
+ parent, err := dir.SV.LoadDir(dir.DotDot.Inode)
+ if err != nil {
+ return "", err
+ }
+ parentName, err := parent.AbsPath()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(parentName, string(dir.DotDot.Name)), nil
+}
+
+func (sv *Subvolume) LoadFile(inode ObjID) (*File, error) {
+ val := sv.fileCache.GetOrElse(inode, func() (val *File) {
+ val = new(File)
+ fullInode, err := sv.LoadFullInode(inode)
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+ val.FullInode = *fullInode
+ val.SV = sv
+ val.populate()
+ return
+ })
+ if val.Inode == 0 {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (ret *File) populate() {
+ for _, item := range ret.OtherItems {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_REF_KEY:
+ // TODO
+ case btrfsitem.EXTENT_DATA_KEY:
+ ret.Extents = append(ret.Extents, FileExtent{
+ OffsetWithinFile: int64(item.Head.Key.Offset),
+ FileExtent: item.Body.(btrfsitem.FileExtent),
+ })
+ default:
+ panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
+ }
+ }
+
+ // These should already be sorted, because of the nature of
+ // the btree; but this is a recovery tool for corrupt
+ // filesystems, so go ahead and ensure that it's sorted.
+ sort.Slice(ret.Extents, func(i, j int) bool {
+ return ret.Extents[i].OffsetWithinFile < ret.Extents[j].OffsetWithinFile
+ })
+
+ pos := int64(0)
+ for _, extent := range ret.Extents {
+ if extent.OffsetWithinFile != pos {
+ if extent.OffsetWithinFile > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, extent.OffsetWithinFile))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent overlap from %v to %v",
+ extent.OffsetWithinFile, pos))
+ }
+ }
+ size, err := extent.Size()
+ if err != nil {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent %v: %w", extent.OffsetWithinFile, err))
+ }
+ pos += size
+ }
+ if ret.InodeItem != nil && pos != ret.InodeItem.Size {
+ if ret.InodeItem.Size > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, ret.InodeItem.Size))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent mapped past end of file from %v to %v",
+ ret.InodeItem.Size, pos))
+ }
+ }
+}
+
+func (file *File) ReadAt(dat []byte, off int64) (int, error) {
+ // These stateles maybe-short-reads each do an O(n) extent
+ // lookup, so reading a file is O(n^2), but we expect n to be
+ // small, so whatev. Turn file.Extents it in to an rbtree if
+ // it becomes a problem.
+ done := 0
+ for done < len(dat) {
+ n, err := file.maybeShortReadAt(dat[done:], off+int64(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (file *File) maybeShortReadAt(dat []byte, off int64) (int, error) {
+ for _, extent := range file.Extents {
+ extBeg := extent.OffsetWithinFile
+ if extBeg > off {
+ break
+ }
+ extLen, err := extent.Size()
+ if err != nil {
+ continue
+ }
+ extEnd := extBeg + extLen
+ if extEnd <= off {
+ continue
+ }
+ offsetWithinExt := off - extent.OffsetWithinFile
+ readSize := util.Min(int64(len(dat)), extLen-offsetWithinExt)
+ switch extent.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ return copy(dat, extent.BodyInline[offsetWithinExt:offsetWithinExt+readSize]), nil
+ case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
+ return file.SV.FS.ReadAt(dat[:readSize],
+ extent.BodyExtent.DiskByteNr.
+ Add(extent.BodyExtent.Offset).
+ Add(btrfsvol.AddrDelta(offsetWithinExt)))
+ }
+ }
+ if file.InodeItem != nil && off >= file.InodeItem.Size {
+ return 0, io.EOF
+ }
+ return 0, fmt.Errorf("read: could not map position %v", off)
+}
+
+var _ io.ReaderAt = (*File)(nil)
diff --git a/lib/btrfs/types_node.go b/lib/btrfs/types_node.go
new file mode 100644
index 0000000..5934f40
--- /dev/null
+++ b/lib/btrfs/types_node.go
@@ -0,0 +1,411 @@
+package btrfs
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type NodeFlags uint64
+
+func (NodeFlags) BinaryStaticSize() int {
+ return 7
+}
+func (f NodeFlags) MarshalBinary() ([]byte, error) {
+ var bs [8]byte
+ binary.LittleEndian.PutUint64(bs[:], uint64(f))
+ return bs[:7], nil
+}
+func (f *NodeFlags) UnmarshalBinary(dat []byte) (int, error) {
+ var bs [8]byte
+ copy(bs[:7], dat[:7])
+ *f = NodeFlags(binary.LittleEndian.Uint64(bs[:]))
+ return 7, nil
+}
+
+var (
+ _ binstruct.StaticSizer = NodeFlags(0)
+ _ binstruct.Marshaler = NodeFlags(0)
+ _ binstruct.Unmarshaler = (*NodeFlags)(nil)
+)
+
+const (
+ NodeWritten = NodeFlags(1 << iota)
+ NodeReloc
+)
+
+var nodeFlagNames = []string{
+ "WRITTEN",
+ "RELOC",
+}
+
+func (f NodeFlags) Has(req NodeFlags) bool { return f&req == req }
+func (f NodeFlags) String() string { return util.BitfieldString(f, nodeFlagNames, util.HexLower) }
+
+type BackrefRev uint8
+
+const (
+ OldBackrefRev = BackrefRev(iota)
+ MixedBackrefRev = BackrefRev(iota)
+)
+
+// Node: main //////////////////////////////////////////////////////////////////////////////////////
+
+type Node struct {
+ // Some context from the parent filesystem
+ Size uint32 // superblock.NodeSize
+ ChecksumType btrfssum.CSumType // superblock.ChecksumType
+
+ // The node's header (always present)
+ Head NodeHeader
+
+ // The node's body (which one of these is present depends on
+ // the node's type, as specified in the header)
+ BodyInternal []KeyPointer // for internal nodes
+ BodyLeaf []Item // for leave nodes
+
+ Padding []byte
+}
+
+type NodeHeader struct {
+ Checksum btrfssum.CSum `bin:"off=0x0, siz=0x20"`
+ MetadataUUID UUID `bin:"off=0x20, siz=0x10"`
+ Addr btrfsvol.LogicalAddr `bin:"off=0x30, siz=0x8"` // Logical address of this node
+ Flags NodeFlags `bin:"off=0x38, siz=0x7"`
+ BackrefRev BackrefRev `bin:"off=0x3f, siz=0x1"`
+ ChunkTreeUUID UUID `bin:"off=0x40, siz=0x10"`
+ Generation Generation `bin:"off=0x50, siz=0x8"`
+ Owner ObjID `bin:"off=0x58, siz=0x8"` // The ID of the tree that contains this node
+ NumItems uint32 `bin:"off=0x60, siz=0x4"` // [ignored-when-writing]
+ Level uint8 `bin:"off=0x64, siz=0x1"` // 0 for leaf nodes, >=1 for internal nodes
+ binstruct.End `bin:"off=0x65"`
+}
+
+// MaxItems returns the maximum possible valid value of
+// .Head.NumItems.
+func (node Node) MaxItems() uint32 {
+ bodyBytes := node.Size - uint32(binstruct.StaticSize(NodeHeader{}))
+ if node.Head.Level > 0 {
+ return bodyBytes / uint32(binstruct.StaticSize(KeyPointer{}))
+ } else {
+ return bodyBytes / uint32(binstruct.StaticSize(ItemHeader{}))
+ }
+}
+
+func (node Node) CalculateChecksum() (btrfssum.CSum, error) {
+ data, err := binstruct.Marshal(node)
+ if err != nil {
+ return btrfssum.CSum{}, err
+ }
+ return node.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+}
+
+func (node Node) ValidateChecksum() error {
+ stored := node.Head.Checksum
+ calced, err := node.CalculateChecksum()
+ if err != nil {
+ return err
+ }
+ if calced != stored {
+ return fmt.Errorf("node checksum mismatch: stored=%v calculated=%v",
+ stored, calced)
+ }
+ return nil
+}
+
+func (node *Node) UnmarshalBinary(nodeBuf []byte) (int, error) {
+ *node = Node{
+ Size: uint32(len(nodeBuf)),
+ ChecksumType: node.ChecksumType,
+ }
+ n, err := binstruct.Unmarshal(nodeBuf, &node.Head)
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: %w", err)
+ }
+ if node.Head.Level > 0 {
+ _n, err := node.unmarshalInternal(nodeBuf[n:])
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: internal: %w", err)
+ }
+ } else {
+ _n, err := node.unmarshalLeaf(nodeBuf[n:])
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: leaf: %w", err)
+ }
+ }
+ if n != len(nodeBuf) {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: left over data: got %v bytes but only consumed %v",
+ len(nodeBuf), n)
+ }
+ return n, nil
+}
+
+func (node Node) MarshalBinary() ([]byte, error) {
+ if node.Size == 0 {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: .Size must be set")
+ }
+ if node.Size <= uint32(binstruct.StaticSize(NodeHeader{})) {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: .Size must be greater than %v",
+ binstruct.StaticSize(NodeHeader{}))
+ }
+ if node.Head.Level > 0 {
+ node.Head.NumItems = uint32(len(node.BodyInternal))
+ } else {
+ node.Head.NumItems = uint32(len(node.BodyLeaf))
+ }
+
+ buf := make([]byte, node.Size)
+
+ if bs, err := binstruct.Marshal(node.Head); err != nil {
+ return buf, err
+ } else if len(bs) != binstruct.StaticSize(NodeHeader{}) {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: header is %v bytes but expected %v",
+ len(bs), binstruct.StaticSize(NodeHeader{}))
+ } else {
+ copy(buf, bs)
+ }
+
+ if node.Head.Level > 0 {
+ if err := node.marshalInternalTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ return buf, err
+ }
+ } else {
+ if err := node.marshalLeafTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ return buf, err
+ }
+ }
+
+ return buf, nil
+}
+
+// Node: "internal" ////////////////////////////////////////////////////////////////////////////////
+
+type KeyPointer struct {
+ Key Key `bin:"off=0x0, siz=0x11"`
+ BlockPtr btrfsvol.LogicalAddr `bin:"off=0x11, siz=0x8"`
+ Generation Generation `bin:"off=0x19, siz=0x8"`
+ binstruct.End `bin:"off=0x21"`
+}
+
+func (node *Node) unmarshalInternal(bodyBuf []byte) (int, error) {
+ n := 0
+ for i := uint32(0); i < node.Head.NumItems; i++ {
+ var item KeyPointer
+ _n, err := binstruct.Unmarshal(bodyBuf[n:], &item)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("item %v: %w", i, err)
+ }
+ node.BodyInternal = append(node.BodyInternal, item)
+ }
+ node.Padding = bodyBuf[n:]
+ return len(bodyBuf), nil
+}
+
+func (node *Node) marshalInternalTo(bodyBuf []byte) error {
+ n := 0
+ for i, item := range node.BodyInternal {
+ bs, err := binstruct.Marshal(item)
+ if err != nil {
+ return fmt.Errorf("item %v: %w", i, err)
+ }
+ if copy(bodyBuf[n:], bs) < len(bs) {
+ return fmt.Errorf("item %v: not enough space: need at least %v+%v=%v bytes, but only have %v",
+ i, n, len(bs), n+len(bs), len(bodyBuf))
+ }
+ n += len(bs)
+ }
+ if copy(bodyBuf[n:], node.Padding) < len(node.Padding) {
+ return fmt.Errorf("padding: not enough space: need at least %v+%v=%v bytes, but only have %v",
+ n, len(node.Padding), n+len(node.Padding), len(bodyBuf))
+ }
+ return nil
+}
+
+// Node: "leaf" ////////////////////////////////////////////////////////////////////////////////////
+
+type Item struct {
+ Head ItemHeader
+ Body btrfsitem.Item
+}
+
+type ItemHeader struct {
+ Key Key `bin:"off=0x0, siz=0x11"`
+ DataOffset uint32 `bin:"off=0x11, siz=0x4"` // [ignored-when-writing] relative to the end of the header (0x65)
+ DataSize uint32 `bin:"off=0x15, siz=0x4"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0x19"`
+}
+
+func (node *Node) unmarshalLeaf(bodyBuf []byte) (int, error) {
+ head := 0
+ tail := len(bodyBuf)
+ for i := uint32(0); i < node.Head.NumItems; i++ {
+ var item Item
+
+ n, err := binstruct.Unmarshal(bodyBuf[head:], &item.Head)
+ head += n
+ if err != nil {
+ return 0, fmt.Errorf("item %v: head: %w", i, err)
+ }
+ if head > tail {
+ return 0, fmt.Errorf("item %v: head: end_offset=%#x is in the body section (offset>%#x)",
+ i, head, tail)
+ }
+
+ dataOff := int(item.Head.DataOffset)
+ if dataOff < head {
+ return 0, fmt.Errorf("item %v: body: beg_offset=%#x is in the head section (offset<%#x)",
+ i, dataOff, head)
+ }
+ dataSize := int(item.Head.DataSize)
+ if dataOff+dataSize != tail {
+ return 0, fmt.Errorf("item %v: body: end_offset=%#x is not cur_tail=%#x)",
+ i, dataOff+dataSize, tail)
+ }
+ tail = dataOff
+ dataBuf := bodyBuf[dataOff : dataOff+dataSize]
+ item.Body = btrfsitem.UnmarshalItem(item.Head.Key, node.ChecksumType, dataBuf)
+
+ node.BodyLeaf = append(node.BodyLeaf, item)
+ }
+
+ node.Padding = bodyBuf[head:tail]
+ return len(bodyBuf), nil
+}
+
+func (node *Node) marshalLeafTo(bodyBuf []byte) error {
+ head := 0
+ tail := len(bodyBuf)
+ for i, item := range node.BodyLeaf {
+ itemBodyBuf, err := binstruct.Marshal(item.Body)
+ if err != nil {
+ return fmt.Errorf("item %v: body: %w", i, err)
+ }
+ item.Head.DataSize = uint32(len(itemBodyBuf))
+ item.Head.DataOffset = uint32(tail - len(itemBodyBuf))
+ itemHeadBuf, err := binstruct.Marshal(item.Head)
+ if err != nil {
+ return fmt.Errorf("item %v: head: %w", i, err)
+ }
+
+ if tail-head < len(itemHeadBuf)+len(itemBodyBuf) {
+ return fmt.Errorf("item %v: not enough space: need at least (head_len:%v)+(body_len:%v)=%v free bytes, but only have %v",
+ i, len(itemHeadBuf), len(itemBodyBuf), len(itemHeadBuf)+len(itemBodyBuf), tail-head)
+ }
+
+ copy(bodyBuf[head:], itemHeadBuf)
+ head += len(itemHeadBuf)
+ tail -= len(itemBodyBuf)
+ copy(bodyBuf[tail:], itemBodyBuf)
+ }
+ if copy(bodyBuf[head:tail], node.Padding) < len(node.Padding) {
+ return fmt.Errorf("padding: not enough space: need at least %v free bytes, but only have %v",
+ len(node.Padding), tail-head)
+ }
+ return nil
+}
+
+func (node *Node) LeafFreeSpace() uint32 {
+ if node.Head.Level > 0 {
+ panic(fmt.Errorf("Node.LeafFreeSpace: not a leaf node"))
+ }
+ freeSpace := node.Size
+ freeSpace -= uint32(binstruct.StaticSize(NodeHeader{}))
+ for _, item := range node.BodyLeaf {
+ freeSpace -= uint32(binstruct.StaticSize(ItemHeader{}))
+ freeSpace -= item.Head.DataSize
+ }
+ return freeSpace
+}
+
+// Tie Nodes in to the FS //////////////////////////////////////////////////////////////////////////
+
+var ErrNotANode = errors.New("does not look like a node")
+
+func ReadNode[Addr ~int64](fs util.File[Addr], sb Superblock, addr Addr, laddrCB func(btrfsvol.LogicalAddr) error) (*util.Ref[Addr, Node], error) {
+ nodeBuf := make([]byte, sb.NodeSize)
+ if _, err := fs.ReadAt(nodeBuf, addr); err != nil {
+ return nil, err
+ }
+
+ // parse (early)
+
+ nodeRef := &util.Ref[Addr, Node]{
+ File: fs,
+ Addr: addr,
+ Data: Node{
+ Size: sb.NodeSize,
+ ChecksumType: sb.ChecksumType,
+ },
+ }
+ if _, err := binstruct.Unmarshal(nodeBuf, &nodeRef.Data.Head); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+
+ // sanity checking
+
+ if nodeRef.Data.Head.MetadataUUID != sb.EffectiveMetadataUUID() {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, ErrNotANode)
+ }
+
+ stored := nodeRef.Data.Head.Checksum
+ calced, err := nodeRef.Data.ChecksumType.Sum(nodeBuf[binstruct.StaticSize(btrfssum.CSum{}):])
+ if err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+ if stored != calced {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: looks like a node but is corrupt: checksum mismatch: stored=%v calculated=%v",
+ addr, stored, calced)
+ }
+
+ if laddrCB != nil {
+ if err := laddrCB(nodeRef.Data.Head.Addr); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+ }
+
+ // parse (main)
+
+ if _, err := nodeRef.Data.UnmarshalBinary(nodeBuf); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+
+ // return
+
+ return nodeRef, nil
+}
+
+func (fs *FS) ReadNode(addr btrfsvol.LogicalAddr) (*util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, fmt.Errorf("btrfs.FS.ReadNode: %w", err)
+ }
+
+ return ReadNode[btrfsvol.LogicalAddr](fs, sb.Data, addr, func(claimAddr btrfsvol.LogicalAddr) error {
+ if claimAddr != addr {
+ return fmt.Errorf("read from laddr=%v but claims to be at laddr=%v",
+ addr, claimAddr)
+ }
+ return nil
+ })
+}
+
+func (fs *FS) readNodeAtLevel(addr btrfsvol.LogicalAddr, expLevel uint8) (*util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ node, err := fs.ReadNode(addr)
+ if err != nil {
+ return node, err
+ }
+ if node.Data.Head.Level != expLevel {
+ return node, fmt.Errorf("btrfs.FS.ReadNode: node@%v: expected level %v but has level %v",
+ node.Addr, expLevel, node.Data.Head.Level)
+ }
+ return node, nil
+}
diff --git a/lib/btrfs/types_superblock.go b/lib/btrfs/types_superblock.go
new file mode 100644
index 0000000..373de48
--- /dev/null
+++ b/lib/btrfs/types_superblock.go
@@ -0,0 +1,233 @@
+package btrfs
+
+import (
+ "fmt"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Superblock struct {
+ Checksum btrfssum.CSum `bin:"off=0x0, siz=0x20"` // Checksum of everything past this field (from 0x20 to 0x1000)
+ FSUUID UUID `bin:"off=0x20, siz=0x10"` // FS UUID
+ Self btrfsvol.PhysicalAddr `bin:"off=0x30, siz=0x8"` // physical address of this block (different for mirrors)
+ Flags uint64 `bin:"off=0x38, siz=0x8"` // flags
+ Magic [8]byte `bin:"off=0x40, siz=0x8"` // magic ('_BHRfS_M')
+ Generation Generation `bin:"off=0x48, siz=0x8"`
+
+ RootTree btrfsvol.LogicalAddr `bin:"off=0x50, siz=0x8"` // logical address of the root tree root
+ ChunkTree btrfsvol.LogicalAddr `bin:"off=0x58, siz=0x8"` // logical address of the chunk tree root
+ LogTree btrfsvol.LogicalAddr `bin:"off=0x60, siz=0x8"` // logical address of the log tree root
+
+ LogRootTransID uint64 `bin:"off=0x68, siz=0x8"` // log_root_transid
+ TotalBytes uint64 `bin:"off=0x70, siz=0x8"` // total_bytes
+ BytesUsed uint64 `bin:"off=0x78, siz=0x8"` // bytes_used
+ RootDirObjectID ObjID `bin:"off=0x80, siz=0x8"` // root_dir_objectid (usually 6)
+ NumDevices uint64 `bin:"off=0x88, siz=0x8"` // num_devices
+
+ SectorSize uint32 `bin:"off=0x90, siz=0x4"`
+ NodeSize uint32 `bin:"off=0x94, siz=0x4"`
+ LeafSize uint32 `bin:"off=0x98, siz=0x4"` // unused; must be the same as NodeSize
+ StripeSize uint32 `bin:"off=0x9c, siz=0x4"`
+ SysChunkArraySize uint32 `bin:"off=0xa0, siz=0x4"`
+
+ ChunkRootGeneration Generation `bin:"off=0xa4, siz=0x8"`
+ CompatFlags uint64 `bin:"off=0xac, siz=0x8"` // compat_flags
+ CompatROFlags uint64 `bin:"off=0xb4, siz=0x8"` // compat_ro_flags - only implementations that support the flags can write to the filesystem
+ IncompatFlags IncompatFlags `bin:"off=0xbc, siz=0x8"` // incompat_flags - only implementations that support the flags can use the filesystem
+ ChecksumType btrfssum.CSumType `bin:"off=0xc4, siz=0x2"`
+
+ RootLevel uint8 `bin:"off=0xc6, siz=0x1"` // root_level
+ ChunkLevel uint8 `bin:"off=0xc7, siz=0x1"` // chunk_root_level
+ LogLevel uint8 `bin:"off=0xc8, siz=0x1"` // log_root_level
+
+ DevItem btrfsitem.Dev `bin:"off=0xc9, siz=0x62"` // DEV_ITEM data for this device
+ Label [0x100]byte `bin:"off=0x12b, siz=0x100"` // label (may not contain '/' or '\\')
+ CacheGeneration Generation `bin:"off=0x22b, siz=0x8"`
+ UUIDTreeGeneration Generation `bin:"off=0x233, siz=0x8"`
+
+ // FeatureIncompatMetadataUUID
+ MetadataUUID UUID `bin:"off=0x23b, siz=0x10"`
+
+ // FeatureIncompatExtentTreeV2
+ NumGlobalRoots uint64 `bin:"off=0x24b, siz=0x8"`
+
+ // FeatureIncompatExtentTreeV2
+ BlockGroupRoot btrfsvol.LogicalAddr `bin:"off=0x253, siz=0x8"`
+ BlockGroupRootGeneration Generation `bin:"off=0x25b, siz=0x8"`
+ BlockGroupRootLevel uint8 `bin:"off=0x263, siz=0x1"`
+
+ Reserved [199]byte `bin:"off=0x264, siz=0xc7"` // future expansion
+
+ SysChunkArray [0x800]byte `bin:"off=0x32b, siz=0x800"` // sys_chunk_array:(n bytes valid) Contains (KEY . CHUNK_ITEM) pairs for all SYSTEM chunks. This is needed to bootstrap the mapping from logical addresses to physical.
+ SuperRoots [4]RootBackup `bin:"off=0xb2b, siz=0x2a0"`
+
+ // Padded to 4096 bytes
+ Padding [565]byte `bin:"off=0xdcb, siz=0x235"`
+ binstruct.End `bin:"off=0x1000"`
+}
+
+func (sb Superblock) CalculateChecksum() (btrfssum.CSum, error) {
+ data, err := binstruct.Marshal(sb)
+ if err != nil {
+ return btrfssum.CSum{}, err
+ }
+ return sb.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+}
+
+func (sb Superblock) ValidateChecksum() error {
+ stored := sb.Checksum
+ calced, err := sb.CalculateChecksum()
+ if err != nil {
+ return err
+ }
+ if calced != stored {
+ return fmt.Errorf("superblock checksum mismatch: stored=%v calculated=%v",
+ stored, calced)
+ }
+ return nil
+}
+
+func (a Superblock) Equal(b Superblock) bool {
+ a.Checksum = btrfssum.CSum{}
+ a.Self = 0
+
+ b.Checksum = btrfssum.CSum{}
+ b.Self = 0
+
+ return reflect.DeepEqual(a, b)
+}
+
+func (sb Superblock) EffectiveMetadataUUID() UUID {
+ if !sb.IncompatFlags.Has(FeatureIncompatMetadataUUID) {
+ return sb.FSUUID
+ }
+ return sb.MetadataUUID
+}
+
+type SysChunk struct {
+ Key Key
+ Chunk btrfsitem.Chunk
+}
+
+func (sc SysChunk) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(sc.Key)
+ if err != nil {
+ return dat, fmt.Errorf("%T.MarshalBinary: %w", sc, err)
+ }
+ _dat, err := binstruct.Marshal(sc.Chunk)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, fmt.Errorf("%T.MarshalBinary: %w", sc, err)
+ }
+ return dat, nil
+}
+
+func (sc *SysChunk) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &sc.Key)
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *sc, err)
+ }
+ _n, err := binstruct.Unmarshal(dat[n:], &sc.Chunk)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *sc, err)
+ }
+ return n, nil
+}
+
+func (sb Superblock) ParseSysChunkArray() ([]SysChunk, error) {
+ dat := sb.SysChunkArray[:sb.SysChunkArraySize]
+ var ret []SysChunk
+ for len(dat) > 0 {
+ var pair SysChunk
+ n, err := binstruct.Unmarshal(dat, &pair)
+ dat = dat[n:]
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, pair)
+ }
+ return ret, nil
+}
+
+type RootBackup struct {
+ TreeRoot ObjID `bin:"off=0x0, siz=0x8"`
+ TreeRootGen Generation `bin:"off=0x8, siz=0x8"`
+
+ ChunkRoot ObjID `bin:"off=0x10, siz=0x8"`
+ ChunkRootGen Generation `bin:"off=0x18, siz=0x8"`
+
+ ExtentRoot ObjID `bin:"off=0x20, siz=0x8"`
+ ExtentRootGen Generation `bin:"off=0x28, siz=0x8"`
+
+ FSRoot ObjID `bin:"off=0x30, siz=0x8"`
+ FSRootGen Generation `bin:"off=0x38, siz=0x8"`
+
+ DevRoot ObjID `bin:"off=0x40, siz=0x8"`
+ DevRootGen Generation `bin:"off=0x48, siz=0x8"`
+
+ ChecksumRoot ObjID `bin:"off=0x50, siz=0x8"`
+ ChecksumRootGen Generation `bin:"off=0x58, siz=0x8"`
+
+ TotalBytes uint64 `bin:"off=0x60, siz=0x8"`
+ BytesUsed uint64 `bin:"off=0x68, siz=0x8"`
+ NumDevices uint64 `bin:"off=0x70, siz=0x8"`
+
+ Unused [8 * 4]byte `bin:"off=0x78, siz=0x20"`
+
+ TreeRootLevel uint8 `bin:"off=0x98, siz=0x1"`
+ ChunkRootLevel uint8 `bin:"off=0x99, siz=0x1"`
+ ExtentRootLevel uint8 `bin:"off=0x9a, siz=0x1"`
+ FSRootLevel uint8 `bin:"off=0x9b, siz=0x1"`
+ DevRootLevel uint8 `bin:"off=0x9c, siz=0x1"`
+ ChecksumRootLevel uint8 `bin:"off=0x9d, siz=0x1"`
+
+ Padding [10]byte `bin:"off=0x9e, siz=0xa"`
+ binstruct.End `bin:"off=0xa8"`
+}
+
+type IncompatFlags uint64
+
+const (
+ FeatureIncompatMixedBackref = IncompatFlags(1 << iota)
+ FeatureIncompatDefaultSubvol
+ FeatureIncompatMixedGroups
+ FeatureIncompatCompressLZO
+ FeatureIncompatCompressZSTD
+ FeatureIncompatBigMetadata // buggy
+ FeatureIncompatExtendedIRef
+ FeatureIncompatRAID56
+ FeatureIncompatSkinnyMetadata
+ FeatureIncompatNoHoles
+ FeatureIncompatMetadataUUID
+ FeatureIncompatRAID1C34
+ FeatureIncompatZoned
+ FeatureIncompatExtentTreeV2
+)
+
+var incompatFlagNames = []string{
+ "FeatureIncompatMixedBackref",
+ "FeatureIncompatDefaultSubvol",
+ "FeatureIncompatMixedGroups",
+ "FeatureIncompatCompressLZO",
+ "FeatureIncompatCompressZSTD",
+ "FeatureIncompatBigMetadata ",
+ "FeatureIncompatExtendedIRef",
+ "FeatureIncompatRAID56",
+ "FeatureIncompatSkinnyMetadata",
+ "FeatureIncompatNoHoles",
+ "FeatureIncompatMetadataUUID",
+ "FeatureIncompatRAID1C34",
+ "FeatureIncompatZoned",
+ "FeatureIncompatExtentTreeV2",
+}
+
+func (f IncompatFlags) Has(req IncompatFlags) bool { return f&req == req }
+func (f IncompatFlags) String() string {
+ return util.BitfieldString(f, incompatFlagNames, util.HexLower)
+}