summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/binstruct/binint.go35
-rw-r--r--lib/binstruct/binint/builtins.go241
-rw-r--r--lib/binstruct/binstruct_test.go60
-rw-r--r--lib/binstruct/marshal.go42
-rw-r--r--lib/binstruct/size.go57
-rw-r--r--lib/binstruct/structs.go186
-rw-r--r--lib/binstruct/unmarshal.go54
-rw-r--r--lib/btrfs/Makefile79
-rw-r--r--lib/btrfs/aliases.go19
-rw-r--r--lib/btrfs/aliases_objid.go37
-rw-r--r--lib/btrfs/btrfsitem/item_blockgroup.go16
-rw-r--r--lib/btrfs/btrfsitem/item_chunk.go90
-rw-r--r--lib/btrfs/btrfsitem/item_dev.go33
-rw-r--r--lib/btrfs/btrfsitem/item_devextent.go32
-rw-r--r--lib/btrfs/btrfsitem/item_dir.go115
-rw-r--r--lib/btrfs/btrfsitem/item_empty.go9
-rw-r--r--lib/btrfs/btrfsitem/item_extent.go165
-rw-r--r--lib/btrfs/btrfsitem/item_extentcsum.go39
-rw-r--r--lib/btrfs/btrfsitem/item_extentdataref.go14
-rw-r--r--lib/btrfs/btrfsitem/item_fileextent.go137
-rw-r--r--lib/btrfs/btrfsitem/item_freespacebitmap.go12
-rw-r--r--lib/btrfs/btrfsitem/item_freespaceinfo.go11
-rw-r--r--lib/btrfs/btrfsitem/item_inode.go64
-rw-r--r--lib/btrfs/btrfsitem/item_inoderef.go35
-rw-r--r--lib/btrfs/btrfsitem/item_metadata.go44
-rw-r--r--lib/btrfs/btrfsitem/item_persistent.go19
-rw-r--r--lib/btrfs/btrfsitem/item_root.go51
-rw-r--r--lib/btrfs/btrfsitem/item_rootref.go34
-rw-r--r--lib/btrfs/btrfsitem/item_shareddataref.go10
-rw-r--r--lib/btrfs/btrfsitem/item_untyped.go14
-rw-r--r--lib/btrfs/btrfsitem/item_uuid.go16
-rw-r--r--lib/btrfs/btrfsitem/items.go77
-rw-r--r--lib/btrfs/btrfsitem/items.txt29
-rw-r--r--lib/btrfs/btrfsitem/items_gen.go97
-rw-r--r--lib/btrfs/btrfssum/csum.go78
-rw-r--r--lib/btrfs/btrfssum/csum_test.go35
-rw-r--r--lib/btrfs/btrfsvol/addr.go47
-rw-r--r--lib/btrfs/btrfsvol/addr_test.go36
-rw-r--r--lib/btrfs/btrfsvol/blockgroupflags.go51
-rw-r--r--lib/btrfs/btrfsvol/chunk.go96
-rw-r--r--lib/btrfs/btrfsvol/devext.go89
-rw-r--r--lib/btrfs/btrfsvol/lvm.go355
-rw-r--r--lib/btrfs/internal/itemtype.go77
-rw-r--r--lib/btrfs/internal/misc.go37
-rw-r--r--lib/btrfs/internal/objid.go144
-rw-r--r--lib/btrfs/io1_pv.go96
-rw-r--r--lib/btrfs/io2_lv.go187
-rw-r--r--lib/btrfs/io3_btree.go562
-rw-r--r--lib/btrfs/io4_fs.go404
-rw-r--r--lib/btrfs/types_node.go411
-rw-r--r--lib/btrfs/types_superblock.go233
-rw-r--r--lib/btrfsmisc/fsck.go51
-rw-r--r--lib/btrfsmisc/open.go24
-rw-r--r--lib/btrfsmisc/print_tree.go362
-rw-r--r--lib/btrfsmisc/walk.go115
-rw-r--r--lib/linux/stat.go95
-rw-r--r--lib/rbtree/print_test.go41
-rw-r--r--lib/rbtree/rbtree.go477
-rw-r--r--lib/rbtree/rbtree_test.go126
-rw-r--r--lib/rbtree/rbtree_util.go48
-rw-r--r--lib/rbtree/testdata/fuzz/FuzzTree/be408ce7760bc8ced841300ea7e6bac1a1e9505b1535810083d18db95d86f4892
-rw-r--r--lib/rbtree/testdata/fuzz/FuzzTree/f9e6421dacf921f7bb25d402bffbfdce114baad0b1c8b9a9189b5a97fda27e412
-rw-r--r--lib/util/bitfield.go50
-rw-r--r--lib/util/fmt.go67
-rw-r--r--lib/util/fmt_test.go99
-rw-r--r--lib/util/generic.go122
-rw-r--r--lib/util/int.go3
-rw-r--r--lib/util/lru.go73
-rw-r--r--lib/util/ref.go54
-rw-r--r--lib/util/uuid.go72
-rw-r--r--lib/util/uuid_test.go63
71 files changed, 6857 insertions, 0 deletions
diff --git a/lib/binstruct/binint.go b/lib/binstruct/binint.go
new file mode 100644
index 0000000..89bb4f6
--- /dev/null
+++ b/lib/binstruct/binint.go
@@ -0,0 +1,35 @@
+package binstruct
+
+import (
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct/binint"
+)
+
+type (
+ U8 = binint.U8
+ U16le = binint.U16le
+ U32le = binint.U32le
+ U64le = binint.U64le
+ U16be = binint.U16be
+ U32be = binint.U32be
+ U64be = binint.U64be
+ I8 = binint.I8
+ I16le = binint.I16le
+ I32le = binint.I32le
+ I64le = binint.I64le
+ I16be = binint.I16be
+ I32be = binint.I32be
+ I64be = binint.I64be
+)
+
+var intKind2Type = map[reflect.Kind]reflect.Type{
+ reflect.Uint8: reflect.TypeOf(U8(0)),
+ reflect.Int8: reflect.TypeOf(I8(0)),
+ reflect.Uint16: reflect.TypeOf(U16le(0)),
+ reflect.Int16: reflect.TypeOf(I16le(0)),
+ reflect.Uint32: reflect.TypeOf(U32le(0)),
+ reflect.Int32: reflect.TypeOf(I32le(0)),
+ reflect.Uint64: reflect.TypeOf(U64le(0)),
+ reflect.Int64: reflect.TypeOf(I64le(0)),
+}
diff --git a/lib/binstruct/binint/builtins.go b/lib/binstruct/binint/builtins.go
new file mode 100644
index 0000000..04fc477
--- /dev/null
+++ b/lib/binstruct/binint/builtins.go
@@ -0,0 +1,241 @@
+package binint
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+func needNBytes(t interface{}, dat []byte, n int) error {
+ if len(dat) < n {
+ return fmt.Errorf("%T.UnmarshalBinary: need at least %v bytes, only have %v", t, n, len(dat))
+ }
+ return nil
+}
+
+// unsigned
+
+type U8 uint8
+
+func (U8) BinaryStaticSize() int { return 1 }
+func (x U8) MarshalBinary() ([]byte, error) { return []byte{byte(x)}, nil }
+func (x *U8) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 1); err != nil {
+ return 0, err
+ }
+ *x = U8(dat[0])
+ return 1, nil
+}
+
+// unsigned little endian
+
+type U16le uint16
+
+func (U16le) BinaryStaticSize() int { return 2 }
+func (x U16le) MarshalBinary() ([]byte, error) {
+ var buf [2]byte
+ binary.LittleEndian.PutUint16(buf[:], uint16(x))
+ return buf[:], nil
+}
+func (x *U16le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 2); err != nil {
+ return 0, err
+ }
+ *x = U16le(binary.LittleEndian.Uint16(dat))
+ return 2, nil
+}
+
+type U32le uint32
+
+func (U32le) BinaryStaticSize() int { return 4 }
+func (x U32le) MarshalBinary() ([]byte, error) {
+ var buf [4]byte
+ binary.LittleEndian.PutUint32(buf[:], uint32(x))
+ return buf[:], nil
+}
+func (x *U32le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 4); err != nil {
+ return 0, err
+ }
+ *x = U32le(binary.LittleEndian.Uint32(dat))
+ return 4, nil
+}
+
+type U64le uint64
+
+func (U64le) BinaryStaticSize() int { return 8 }
+func (x U64le) MarshalBinary() ([]byte, error) {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], uint64(x))
+ return buf[:], nil
+}
+func (x *U64le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 8); err != nil {
+ return 0, err
+ }
+ *x = U64le(binary.LittleEndian.Uint64(dat))
+ return 8, nil
+}
+
+// unsigned big endian
+
+type U16be uint16
+
+func (U16be) BinaryStaticSize() int { return 2 }
+func (x U16be) MarshalBinary() ([]byte, error) {
+ var buf [2]byte
+ binary.BigEndian.PutUint16(buf[:], uint16(x))
+ return buf[:], nil
+}
+func (x *U16be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 2); err != nil {
+ return 0, err
+ }
+ *x = U16be(binary.BigEndian.Uint16(dat))
+ return 2, nil
+}
+
+type U32be uint32
+
+func (U32be) BinaryStaticSize() int { return 4 }
+func (x U32be) MarshalBinary() ([]byte, error) {
+ var buf [4]byte
+ binary.BigEndian.PutUint32(buf[:], uint32(x))
+ return buf[:], nil
+}
+func (x *U32be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 4); err != nil {
+ return 0, err
+ }
+ *x = U32be(binary.BigEndian.Uint32(dat))
+ return 4, nil
+}
+
+type U64be uint64
+
+func (U64be) BinaryStaticSize() int { return 8 }
+func (x U64be) MarshalBinary() ([]byte, error) {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], uint64(x))
+ return buf[:], nil
+}
+func (x *U64be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 8); err != nil {
+ return 0, err
+ }
+ *x = U64be(binary.BigEndian.Uint64(dat))
+ return 8, nil
+}
+
+// signed
+
+type I8 int8
+
+func (I8) BinaryStaticSize() int { return 1 }
+func (x I8) MarshalBinary() ([]byte, error) { return []byte{byte(x)}, nil }
+func (x *I8) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 1); err != nil {
+ return 0, err
+ }
+ *x = I8(dat[0])
+ return 1, nil
+}
+
+// signed little endian
+
+type I16le int16
+
+func (I16le) BinaryStaticSize() int { return 2 }
+func (x I16le) MarshalBinary() ([]byte, error) {
+ var buf [2]byte
+ binary.LittleEndian.PutUint16(buf[:], uint16(x))
+ return buf[:], nil
+}
+func (x *I16le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 2); err != nil {
+ return 0, err
+ }
+ *x = I16le(binary.LittleEndian.Uint16(dat))
+ return 2, nil
+}
+
+type I32le int32
+
+func (I32le) BinaryStaticSize() int { return 4 }
+func (x I32le) MarshalBinary() ([]byte, error) {
+ var buf [4]byte
+ binary.LittleEndian.PutUint32(buf[:], uint32(x))
+ return buf[:], nil
+}
+func (x *I32le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 4); err != nil {
+ return 0, err
+ }
+ *x = I32le(binary.LittleEndian.Uint32(dat))
+ return 4, nil
+}
+
+type I64le int64
+
+func (I64le) BinaryStaticSize() int { return 8 }
+func (x I64le) MarshalBinary() ([]byte, error) {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], uint64(x))
+ return buf[:], nil
+}
+func (x *I64le) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 8); err != nil {
+ return 0, err
+ }
+ *x = I64le(binary.LittleEndian.Uint64(dat))
+ return 8, nil
+}
+
+// signed big endian
+
+type I16be int16
+
+func (I16be) BinaryStaticSize() int { return 2 }
+func (x I16be) MarshalBinary() ([]byte, error) {
+ var buf [2]byte
+ binary.BigEndian.PutUint16(buf[:], uint16(x))
+ return buf[:], nil
+}
+func (x *I16be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 2); err != nil {
+ return 0, err
+ }
+ *x = I16be(binary.BigEndian.Uint16(dat))
+ return 2, nil
+}
+
+type I32be int32
+
+func (I32be) BinaryStaticSize() int { return 4 }
+func (x I32be) MarshalBinary() ([]byte, error) {
+ var buf [4]byte
+ binary.BigEndian.PutUint32(buf[:], uint32(x))
+ return buf[:], nil
+}
+func (x *I32be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 4); err != nil {
+ return 0, err
+ }
+ *x = I32be(binary.BigEndian.Uint32(dat))
+ return 4, nil
+}
+
+type I64be int64
+
+func (I64be) BinaryStaticSize() int { return 8 }
+func (x I64be) MarshalBinary() ([]byte, error) {
+ var buf [8]byte
+ binary.BigEndian.PutUint64(buf[:], uint64(x))
+ return buf[:], nil
+}
+func (x *I64be) UnmarshalBinary(dat []byte) (int, error) {
+ if err := needNBytes(*x, dat, 8); err != nil {
+ return 0, err
+ }
+ *x = I64be(binary.BigEndian.Uint64(dat))
+ return 8, nil
+}
diff --git a/lib/binstruct/binstruct_test.go b/lib/binstruct/binstruct_test.go
new file mode 100644
index 0000000..542746f
--- /dev/null
+++ b/lib/binstruct/binstruct_test.go
@@ -0,0 +1,60 @@
+package binstruct_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+func TestSmoke(t *testing.T) {
+ type UUID [16]byte
+ type PhysicalAddr int64
+ type DevItem struct {
+ DeviceID uint64 `bin:"off=0x0, siz=0x8"` // device id
+
+ NumBytes uint64 `bin:"off=0x8, siz=0x8"` // number of bytes
+ NumBytesUsed uint64 `bin:"off=0x10, siz=0x8"` // number of bytes used
+
+ IOOptimalAlign uint32 `bin:"off=0x18, siz=0x4"` // optimal I/O align
+ IOOptimalWidth uint32 `bin:"off=0x1c, siz=0x4"` // optimal I/O width
+ IOMinSize uint32 `bin:"off=0x20, siz=0x4"` // minimal I/O size (sector size)
+
+ Type uint64 `bin:"off=0x24, siz=0x8"` // type
+ Generation uint64 `bin:"off=0x2c, siz=0x8"` // generation
+ StartOffset uint64 `bin:"off=0x34, siz=0x8"` // start offset
+ DevGroup uint32 `bin:"off=0x3c, siz=0x4"` // dev group
+ SeekSpeed uint8 `bin:"off=0x40, siz=0x1"` // seek speed
+ Bandwidth uint8 `bin:"off=0x41, siz=0x1"` // bandwidth
+
+ DevUUID UUID `bin:"off=0x42, siz=0x10"` // device UUID
+ FSUUID UUID `bin:"off=0x52, siz=0x10"` // FS UUID
+
+ binstruct.End `bin:"off=0x62"`
+ }
+ type TestType struct {
+ Magic [5]byte `bin:"off=0x0,siz=0x5"`
+ Dev DevItem `bin:"off=0x5,siz=0x62"`
+ Addr PhysicalAddr `bin:"off=0x67, siz=0x8"`
+
+ binstruct.End `bin:"off=0x6F"`
+ }
+
+ assert.Equal(t, 0x6F, binstruct.StaticSize(TestType{}))
+
+ input := TestType{}
+ copy(input.Magic[:], "mAgIc")
+ input.Dev.DeviceID = 12
+ input.Addr = 0xBEEF
+
+ bs, err := binstruct.Marshal(input)
+ assert.NoError(t, err)
+ assert.Equal(t, 0x6F, len(bs))
+
+ var output TestType
+ n, err := binstruct.Unmarshal(bs, &output)
+ assert.NoError(t, err)
+ assert.Equal(t, 0x6F, n)
+ assert.Equal(t, input, output)
+}
diff --git a/lib/binstruct/marshal.go b/lib/binstruct/marshal.go
new file mode 100644
index 0000000..684d2f3
--- /dev/null
+++ b/lib/binstruct/marshal.go
@@ -0,0 +1,42 @@
+package binstruct
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+)
+
+type Marshaler = encoding.BinaryMarshaler
+
+func Marshal(obj any) ([]byte, error) {
+ if mar, ok := obj.(Marshaler); ok {
+ return mar.MarshalBinary()
+ }
+ return MarshalWithoutInterface(obj)
+}
+
+func MarshalWithoutInterface(obj any) ([]byte, error) {
+ val := reflect.ValueOf(obj)
+ switch val.Kind() {
+ case reflect.Uint8, reflect.Int8, reflect.Uint16, reflect.Int16, reflect.Uint32, reflect.Int32, reflect.Uint64, reflect.Int64:
+ typ := intKind2Type[val.Kind()]
+ return val.Convert(typ).Interface().(Marshaler).MarshalBinary()
+ case reflect.Ptr:
+ return Marshal(val.Elem().Interface())
+ case reflect.Array:
+ var ret []byte
+ for i := 0; i < val.Len(); i++ {
+ bs, err := Marshal(val.Index(i).Interface())
+ ret = append(ret, bs...)
+ if err != nil {
+ return ret, err
+ }
+ }
+ return ret, nil
+ case reflect.Struct:
+ return getStructHandler(val.Type()).Marshal(val)
+ default:
+ panic(fmt.Errorf("type=%v does not implement binfmt.Marshaler and kind=%v is not a supported statically-sized kind",
+ val.Type(), val.Kind()))
+ }
+}
diff --git a/lib/binstruct/size.go b/lib/binstruct/size.go
new file mode 100644
index 0000000..6563455
--- /dev/null
+++ b/lib/binstruct/size.go
@@ -0,0 +1,57 @@
+package binstruct
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type StaticSizer interface {
+ BinaryStaticSize() int
+}
+
+func StaticSize(obj any) int {
+ sz, err := staticSize(reflect.TypeOf(obj))
+ if err != nil {
+ panic(err)
+ }
+ return sz
+}
+
+var (
+ staticSizerType = reflect.TypeOf((*StaticSizer)(nil)).Elem()
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+func staticSize(typ reflect.Type) (int, error) {
+ if typ.Implements(staticSizerType) {
+ return reflect.New(typ).Elem().Interface().(StaticSizer).BinaryStaticSize(), nil
+ }
+ switch typ.Kind() {
+ case reflect.Uint8, reflect.Int8:
+ return 1, nil
+ case reflect.Uint16, reflect.Int16:
+ return 2, nil
+ case reflect.Uint32, reflect.Int32:
+ return 4, nil
+ case reflect.Uint64, reflect.Int64:
+ return 8, nil
+ case reflect.Ptr:
+ return staticSize(typ.Elem())
+ case reflect.Array:
+ elemSize, err := staticSize(typ.Elem())
+ if err != nil {
+ return 0, err
+ }
+ return elemSize * typ.Len(), nil
+ case reflect.Struct:
+ if !(typ.Implements(marshalerType) || typ.Implements(unmarshalerType)) {
+ return getStructHandler(typ).Size, nil
+ }
+ return 0, fmt.Errorf("type=%v (kind=%v) does not implement binfmt.StaticSizer but does implement binfmt.Marshaler or binfmt.Unmarshaler",
+ typ, typ.Kind())
+ default:
+ return 0, fmt.Errorf("type=%v does not implement binfmt.StaticSizer and kind=%v is not a supported statically-sized kind",
+ typ, typ.Kind())
+ }
+}
diff --git a/lib/binstruct/structs.go b/lib/binstruct/structs.go
new file mode 100644
index 0000000..ec2bb7d
--- /dev/null
+++ b/lib/binstruct/structs.go
@@ -0,0 +1,186 @@
+package binstruct
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type End struct{}
+
+var endType = reflect.TypeOf(End{})
+
+type tag struct {
+ skip bool
+
+ off int
+ siz int
+}
+
+func parseStructTag(str string) (tag, error) {
+ var ret tag
+ for _, part := range strings.Split(str, ",") {
+ part = strings.TrimSpace(part)
+ if part == "" {
+ continue
+ }
+ if part == "-" {
+ return tag{skip: true}, nil
+ }
+ keyval := strings.SplitN(part, "=", 2)
+ if len(keyval) != 2 {
+ return tag{}, fmt.Errorf("option is not a key=value pair: %q", part)
+ }
+ key := keyval[0]
+ val := keyval[1]
+ switch key {
+ case "off":
+ vint, err := strconv.ParseInt(val, 0, 0)
+ if err != nil {
+ return tag{}, err
+ }
+ ret.off = int(vint)
+ case "siz":
+ vint, err := strconv.ParseInt(val, 0, 0)
+ if err != nil {
+ return tag{}, err
+ }
+ ret.siz = int(vint)
+ default:
+ return tag{}, fmt.Errorf("unrecognized option %q", key)
+ }
+ }
+ return ret, nil
+}
+
+type structHandler struct {
+ name string
+ Size int
+ fields []structField
+}
+
+type structField struct {
+ name string
+ tag
+}
+
+func (sh structHandler) Unmarshal(dat []byte, dst reflect.Value) (int, error) {
+ var n int
+ for i, field := range sh.fields {
+ if field.skip {
+ continue
+ }
+ _n, err := Unmarshal(dat[n:], dst.Field(i).Addr().Interface())
+ if err != nil {
+ if _n >= 0 {
+ n += _n
+ }
+ return n, fmt.Errorf("struct %q field %v %q: %w",
+ sh.name, i, field.name, err)
+ }
+ if _n != field.siz {
+ return n, fmt.Errorf("struct %q field %v %q: consumed %v bytes but should have consumed %v bytes",
+ sh.name, i, field.name, _n, field.siz)
+ }
+ n += _n
+ }
+ return n, nil
+}
+
+func (sh structHandler) Marshal(val reflect.Value) ([]byte, error) {
+ ret := make([]byte, 0, sh.Size)
+ for i, field := range sh.fields {
+ if field.skip {
+ continue
+ }
+ bs, err := Marshal(val.Field(i).Interface())
+ ret = append(ret, bs...)
+ if err != nil {
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ sh.name, i, field.name, err)
+ }
+ }
+ return ret, nil
+}
+
+func genStructHandler(structInfo reflect.Type) (structHandler, error) {
+ var ret structHandler
+
+ ret.name = structInfo.String()
+
+ var curOffset, endOffset int
+ for i := 0; i < structInfo.NumField(); i++ {
+ var fieldInfo reflect.StructField = structInfo.Field(i)
+
+ if fieldInfo.Anonymous && fieldInfo.Type != endType {
+ err := fmt.Errorf("binstruct does not support embedded fields")
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ ret.name, i, fieldInfo.Name, err)
+ }
+
+ fieldTag, err := parseStructTag(fieldInfo.Tag.Get("bin"))
+ if err != nil {
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ ret.name, i, fieldInfo.Name, err)
+ }
+ if fieldTag.skip {
+ ret.fields = append(ret.fields, structField{
+ tag: fieldTag,
+ name: fieldInfo.Name,
+ })
+ continue
+ }
+
+ if fieldTag.off != curOffset {
+ err := fmt.Errorf("tag says off=%#x but curOffset=%#x", fieldTag.off, curOffset)
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ ret.name, i, fieldInfo.Name, err)
+ }
+ if fieldInfo.Type == endType {
+ endOffset = curOffset
+ }
+
+ fieldSize, err := staticSize(fieldInfo.Type)
+ if err != nil {
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ ret.name, i, fieldInfo.Name, err)
+ }
+
+ if fieldTag.siz != fieldSize {
+ err := fmt.Errorf("tag says siz=%#x but StaticSize(typ)=%#x", fieldTag.siz, fieldSize)
+ return ret, fmt.Errorf("struct %q field %v %q: %w",
+ ret.name, i, fieldInfo.Name, err)
+ }
+ curOffset += fieldTag.siz
+
+ ret.fields = append(ret.fields, structField{
+ name: fieldInfo.Name,
+ tag: fieldTag,
+ })
+ }
+ ret.Size = curOffset
+
+ if ret.Size != endOffset {
+ return ret, fmt.Errorf("struct %q: .Size=%v but endOffset=%v",
+ ret.name, ret.Size, endOffset)
+ }
+
+ return ret, nil
+}
+
+var structCache = make(map[reflect.Type]structHandler)
+
+func getStructHandler(typ reflect.Type) structHandler {
+ h, ok := structCache[typ]
+ if ok {
+ return h
+ }
+
+ h, err := genStructHandler(typ)
+ if err != nil {
+ panic(err)
+ }
+ structCache[typ] = h
+ return h
+}
diff --git a/lib/binstruct/unmarshal.go b/lib/binstruct/unmarshal.go
new file mode 100644
index 0000000..1959d45
--- /dev/null
+++ b/lib/binstruct/unmarshal.go
@@ -0,0 +1,54 @@
+package binstruct
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type Unmarshaler interface {
+ UnmarshalBinary([]byte) (int, error)
+}
+
+func Unmarshal(dat []byte, dstPtr any) (int, error) {
+ if unmar, ok := dstPtr.(Unmarshaler); ok {
+ return unmar.UnmarshalBinary(dat)
+ }
+ return UnmarshalWithoutInterface(dat, dstPtr)
+}
+
+func UnmarshalWithoutInterface(dat []byte, dstPtr any) (int, error) {
+ _dstPtr := reflect.ValueOf(dstPtr)
+ if _dstPtr.Kind() != reflect.Ptr {
+ return 0, fmt.Errorf("not a pointer: %v", _dstPtr.Type())
+ }
+ dst := _dstPtr.Elem()
+
+ switch dst.Kind() {
+ case reflect.Uint8, reflect.Int8, reflect.Uint16, reflect.Int16, reflect.Uint32, reflect.Int32, reflect.Uint64, reflect.Int64:
+ typ := intKind2Type[dst.Kind()]
+ newDstPtr := reflect.New(typ)
+ n, err := Unmarshal(dat, newDstPtr.Interface())
+ dst.Set(newDstPtr.Elem().Convert(dst.Type()))
+ return n, err
+ case reflect.Ptr:
+ elemPtr := reflect.New(dst.Type().Elem())
+ n, err := Unmarshal(dat, elemPtr.Interface())
+ dst.Set(elemPtr.Convert(dst.Type()))
+ return n, err
+ case reflect.Array:
+ var n int
+ for i := 0; i < dst.Len(); i++ {
+ _n, err := Unmarshal(dat[n:], dst.Index(i).Addr().Interface())
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ case reflect.Struct:
+ return getStructHandler(dst.Type()).Unmarshal(dat, dst)
+ default:
+ panic(fmt.Errorf("type=%v does not implement binfmt.Unmarshaler and kind=%v is not a supported statically-sized kind",
+ dst.Type(), dst.Kind()))
+ }
+}
diff --git a/lib/btrfs/Makefile b/lib/btrfs/Makefile
new file mode 100644
index 0000000..43d665f
--- /dev/null
+++ b/lib/btrfs/Makefile
@@ -0,0 +1,79 @@
+.DEFAULT_GOAL = all
+.SECONDARY:
+.DELETE_ON_ERROR:
+
+btrfsitem/items.txt: btrfsitem $(wildcard btrfsitem/item_*.go) $(MAKEFILE_LIST)
+ { \
+ sed -En 's,^type (\S+) .* // (.*=.*),\1 \2,p' $(filter btrfsitem/item_%.go,$^) | while read -r typ keys; do \
+ for key in $$keys; do \
+ echo "$$key" "$$typ"; \
+ done; \
+ done; \
+ } | LC_COLLATE=C sort >$@
+files += btrfsitem/items.txt
+
+btrfsitem/items_gen.go: btrfsitem/items.txt $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package $(@D)'; \
+ echo 'import ('; \
+ echo '"reflect"'; \
+ echo; \
+ echo '"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"'; \
+ echo ')'; \
+ echo 'const ('; \
+ sed -E 's,(.*)=(.*) (.*),\1_KEY=internal.\1_KEY,' $<; \
+ echo ')'; \
+ echo 'var keytype2gotype = map[Type]reflect.Type{'; \
+ sed -En 's|(.*)=([^:]*) (.*)|\1_KEY: reflect.TypeOf(\3{}),|p' $<; \
+ echo '}'; \
+ echo 'var untypedObjID2gotype = map[internal.ObjID]reflect.Type{'; \
+ sed -En 's|UNTYPED=0:(.*) (.*)|internal.\1: reflect.TypeOf(\2{}),|p' $<; \
+ echo '}'; \
+ sed -En 's,(.*)=(.*) (.+),\3,p' $< | LC_COLLATE=C sort -u | sed 's,.*,func (&) isItem() {},'; \
+ } | gofmt >$@
+files += btrfsitem/items_gen.go
+
+internal/itemtype.go: btrfsitem/items.txt $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package $(@D)'; \
+ echo 'import "fmt"'; \
+ echo 'type ItemType uint8'; \
+ echo 'const ('; \
+ sed -E 's,(.*)=([^:]*)(:.*)? (.*),\1_KEY=ItemType(\2),' $< | uniq; \
+ echo ')'; \
+ echo 'func (t ItemType) String() string {'; \
+ echo ' names := map[ItemType]string{'; \
+ sed -E 's@(.*)=(.*) (.*)@\1_KEY: "\1",@' $< | sed 's/"UUID_/&KEY_/'; \
+ echo ' }'; \
+ echo ' if name, ok := names[t]; ok {'; \
+ echo ' return name'; \
+ echo ' }'; \
+ echo ' return fmt.Sprintf("%d", t)'; \
+ echo '}'; \
+ } | gofmt >$@
+files += internal/itemtype.go
+
+aliases_objid.go: internal/objid.go $(MAKEFILE_LIST)
+ { \
+ echo '// Code generated by Make. DO NOT EDIT.'; \
+ echo; \
+ echo 'package btrfs'; \
+ echo 'import ('; \
+ echo '"git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"'; \
+ echo ')'; \
+ echo 'const('; \
+ sed -En 's/^\s*(\S*_OBJECTIDS?)\s*=.*/\1 = internal.\1/p' <$<; \
+ echo ')'; \
+ } | gofmt >$@
+files += aliases_objid.go
+
+all: $(files)
+.PHONY: all
+
+clean:
+ rm -f -- $(files)
+.PHONY: all
diff --git a/lib/btrfs/aliases.go b/lib/btrfs/aliases.go
new file mode 100644
index 0000000..75cbf8b
--- /dev/null
+++ b/lib/btrfs/aliases.go
@@ -0,0 +1,19 @@
+package btrfs
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type (
+ // (u)int64 types
+
+ Generation = internal.Generation
+ ObjID = internal.ObjID
+
+ // complex types
+
+ Key = internal.Key
+ Time = internal.Time
+ UUID = util.UUID
+)
diff --git a/lib/btrfs/aliases_objid.go b/lib/btrfs/aliases_objid.go
new file mode 100644
index 0000000..6ccbbad
--- /dev/null
+++ b/lib/btrfs/aliases_objid.go
@@ -0,0 +1,37 @@
+// Code generated by Make. DO NOT EDIT.
+
+package btrfs
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+const (
+ ROOT_TREE_OBJECTID = internal.ROOT_TREE_OBJECTID
+ EXTENT_TREE_OBJECTID = internal.EXTENT_TREE_OBJECTID
+ CHUNK_TREE_OBJECTID = internal.CHUNK_TREE_OBJECTID
+ DEV_TREE_OBJECTID = internal.DEV_TREE_OBJECTID
+ FS_TREE_OBJECTID = internal.FS_TREE_OBJECTID
+ ROOT_TREE_DIR_OBJECTID = internal.ROOT_TREE_DIR_OBJECTID
+ CSUM_TREE_OBJECTID = internal.CSUM_TREE_OBJECTID
+ QUOTA_TREE_OBJECTID = internal.QUOTA_TREE_OBJECTID
+ UUID_TREE_OBJECTID = internal.UUID_TREE_OBJECTID
+ FREE_SPACE_TREE_OBJECTID = internal.FREE_SPACE_TREE_OBJECTID
+ BLOCK_GROUP_TREE_OBJECTID = internal.BLOCK_GROUP_TREE_OBJECTID
+ DEV_STATS_OBJECTID = internal.DEV_STATS_OBJECTID
+ BALANCE_OBJECTID = internal.BALANCE_OBJECTID
+ ORPHAN_OBJECTID = internal.ORPHAN_OBJECTID
+ TREE_LOG_OBJECTID = internal.TREE_LOG_OBJECTID
+ TREE_LOG_FIXUP_OBJECTID = internal.TREE_LOG_FIXUP_OBJECTID
+ TREE_RELOC_OBJECTID = internal.TREE_RELOC_OBJECTID
+ DATA_RELOC_TREE_OBJECTID = internal.DATA_RELOC_TREE_OBJECTID
+ EXTENT_CSUM_OBJECTID = internal.EXTENT_CSUM_OBJECTID
+ FREE_SPACE_OBJECTID = internal.FREE_SPACE_OBJECTID
+ FREE_INO_OBJECTID = internal.FREE_INO_OBJECTID
+ MULTIPLE_OBJECTIDS = internal.MULTIPLE_OBJECTIDS
+ FIRST_FREE_OBJECTID = internal.FIRST_FREE_OBJECTID
+ LAST_FREE_OBJECTID = internal.LAST_FREE_OBJECTID
+ FIRST_CHUNK_TREE_OBJECTID = internal.FIRST_CHUNK_TREE_OBJECTID
+ DEV_ITEMS_OBJECTID = internal.DEV_ITEMS_OBJECTID
+ EMPTY_SUBVOL_DIR_OBJECTID = internal.EMPTY_SUBVOL_DIR_OBJECTID
+)
diff --git a/lib/btrfs/btrfsitem/item_blockgroup.go b/lib/btrfs/btrfsitem/item_blockgroup.go
new file mode 100644
index 0000000..71d960d
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_blockgroup.go
@@ -0,0 +1,16 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = logical_addr
+// key.offset = size of chunk
+type BlockGroup struct { // BLOCK_GROUP_ITEM=192
+ Used int64 `bin:"off=0, siz=8"`
+ ChunkObjectID internal.ObjID `bin:"off=8, siz=8"` // always BTRFS_FIRST_CHUNK_TREE_OBJECTID
+ Flags btrfsvol.BlockGroupFlags `bin:"off=16, siz=8"`
+ binstruct.End `bin:"off=24"`
+}
diff --git a/lib/btrfs/btrfsitem/item_chunk.go b/lib/btrfs/btrfsitem/item_chunk.go
new file mode 100644
index 0000000..9256651
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_chunk.go
@@ -0,0 +1,90 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// Maps logical address to physical.
+//
+// key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID
+// key.offset = logical_addr
+type Chunk struct { // CHUNK_ITEM=228
+ Head ChunkHeader
+ Stripes []ChunkStripe
+}
+
+type ChunkHeader struct {
+ Size btrfsvol.AddrDelta `bin:"off=0x0, siz=0x8"`
+ Owner internal.ObjID `bin:"off=0x8, siz=0x8"` // root referencing this chunk (always EXTENT_TREE_OBJECTID=2)
+ StripeLen uint64 `bin:"off=0x10, siz=0x8"` // ???
+ Type btrfsvol.BlockGroupFlags `bin:"off=0x18, siz=0x8"`
+ IOOptimalAlign uint32 `bin:"off=0x20, siz=0x4"`
+ IOOptimalWidth uint32 `bin:"off=0x24, siz=0x4"`
+ IOMinSize uint32 `bin:"off=0x28, siz=0x4"` // sector size
+ NumStripes uint16 `bin:"off=0x2c, siz=0x2"` // [ignored-when-writing]
+ SubStripes uint16 `bin:"off=0x2e, siz=0x2"` // ???
+ binstruct.End `bin:"off=0x30"`
+}
+
+type ChunkStripe struct {
+ DeviceID btrfsvol.DeviceID `bin:"off=0x0, siz=0x8"`
+ Offset btrfsvol.PhysicalAddr `bin:"off=0x8, siz=0x8"`
+ DeviceUUID util.UUID `bin:"off=0x10, siz=0x10"`
+ binstruct.End `bin:"off=0x20"`
+}
+
+func (chunk Chunk) Mappings(key internal.Key) []btrfsvol.Mapping {
+ ret := make([]btrfsvol.Mapping, 0, len(chunk.Stripes))
+ for _, stripe := range chunk.Stripes {
+ ret = append(ret, btrfsvol.Mapping{
+ LAddr: btrfsvol.LogicalAddr(key.Offset),
+ PAddr: btrfsvol.QualifiedPhysicalAddr{
+ Dev: stripe.DeviceID,
+ Addr: stripe.Offset,
+ },
+ Size: chunk.Head.Size,
+ SizeLocked: true,
+ Flags: &chunk.Head.Type,
+ })
+ }
+ return ret
+}
+
+func (chunk *Chunk) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &chunk.Head)
+ if err != nil {
+ return n, err
+ }
+ chunk.Stripes = nil
+ for i := 0; i < int(chunk.Head.NumStripes); i++ {
+ var stripe ChunkStripe
+ _n, err := binstruct.Unmarshal(dat[n:], &stripe)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *chunk, err)
+ }
+ chunk.Stripes = append(chunk.Stripes, stripe)
+ }
+ return n, nil
+}
+
+func (chunk Chunk) MarshalBinary() ([]byte, error) {
+ chunk.Head.NumStripes = uint16(len(chunk.Stripes))
+ ret, err := binstruct.Marshal(chunk.Head)
+ if err != nil {
+ return ret, err
+ }
+ for _, stripe := range chunk.Stripes {
+ _ret, err := binstruct.Marshal(stripe)
+ ret = append(ret, _ret...)
+ if err != nil {
+ return ret, fmt.Errorf("%T.MarshalBinary: %w", chunk, err)
+ }
+ }
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_dev.go b/lib/btrfs/btrfsitem/item_dev.go
new file mode 100644
index 0000000..d3fe582
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_dev.go
@@ -0,0 +1,33 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// key.objectid = BTRFS_DEV_ITEMS_OBJECTID
+// key.offset = device_id (starting at 1)
+type Dev struct { // DEV_ITEM=216
+ DevID btrfsvol.DeviceID `bin:"off=0x0, siz=0x8"`
+
+ NumBytes uint64 `bin:"off=0x8, siz=0x8"`
+ NumBytesUsed uint64 `bin:"off=0x10, siz=0x8"`
+
+ IOOptimalAlign uint32 `bin:"off=0x18, siz=0x4"`
+ IOOptimalWidth uint32 `bin:"off=0x1c, siz=0x4"`
+ IOMinSize uint32 `bin:"off=0x20, siz=0x4"` // sector size
+
+ Type uint64 `bin:"off=0x24, siz=0x8"`
+ Generation internal.Generation `bin:"off=0x2c, siz=0x8"`
+ StartOffset uint64 `bin:"off=0x34, siz=0x8"`
+ DevGroup uint32 `bin:"off=0x3c, siz=0x4"`
+ SeekSpeed uint8 `bin:"off=0x40, siz=0x1"`
+ Bandwidth uint8 `bin:"off=0x41, siz=0x1"`
+
+ DevUUID util.UUID `bin:"off=0x42, siz=0x10"`
+ FSUUID util.UUID `bin:"off=0x52, siz=0x10"`
+
+ binstruct.End `bin:"off=0x62"`
+}
diff --git a/lib/btrfs/btrfsitem/item_devextent.go b/lib/btrfs/btrfsitem/item_devextent.go
new file mode 100644
index 0000000..c346d85
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_devextent.go
@@ -0,0 +1,32 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// key.objectid = device_id
+// key.offset = physical_addr
+type DevExtent struct { // DEV_EXTENT=204
+ ChunkTree int64 `bin:"off=0, siz=8"`
+ ChunkObjectID internal.ObjID `bin:"off=8, siz=8"`
+ ChunkOffset btrfsvol.LogicalAddr `bin:"off=16, siz=8"`
+ Length btrfsvol.AddrDelta `bin:"off=24, siz=8"`
+ ChunkTreeUUID util.UUID `bin:"off=32, siz=16"`
+ binstruct.End `bin:"off=48"`
+}
+
+func (devext DevExtent) Mapping(key internal.Key) btrfsvol.Mapping {
+ return btrfsvol.Mapping{
+ LAddr: devext.ChunkOffset,
+ PAddr: btrfsvol.QualifiedPhysicalAddr{
+ Dev: btrfsvol.DeviceID(key.ObjectID),
+ Addr: btrfsvol.PhysicalAddr(key.Offset),
+ },
+ Size: devext.Length,
+ SizeLocked: true,
+ Flags: nil,
+ }
+}
diff --git a/lib/btrfs/btrfsitem/item_dir.go b/lib/btrfs/btrfsitem/item_dir.go
new file mode 100644
index 0000000..57f6d6d
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_dir.go
@@ -0,0 +1,115 @@
+package btrfsitem
+
+import (
+ "fmt"
+ "hash/crc32"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = inode of directory containing this entry
+// key.offset =
+// for DIR_ITEM and XATTR_ITEM = NameHash(name)
+// for DIR_INDEX = index id in the directory (starting at 2, because "." and "..")
+type DirEntries []DirEntry // DIR_ITEM=84 DIR_INDEX=96 XATTR_ITEM=24
+
+func NameHash(dat []byte) uint64 {
+ return uint64(^crc32.Update(1, crc32.MakeTable(crc32.Castagnoli), dat))
+}
+
+func (o *DirEntries) UnmarshalBinary(dat []byte) (int, error) {
+ *o = nil
+ n := 0
+ for n < len(dat) {
+ var ref DirEntry
+ _n, err := binstruct.Unmarshal(dat, &ref)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ *o = append(*o, ref)
+ }
+ return n, nil
+}
+
+func (o DirEntries) MarshalBinary() ([]byte, error) {
+ var ret []byte
+ for _, ref := range o {
+ bs, err := binstruct.Marshal(ref)
+ ret = append(ret, bs...)
+ if err != nil {
+ return ret, err
+ }
+ }
+ return ret, nil
+}
+
+type DirEntry struct {
+ Location internal.Key `bin:"off=0x0, siz=0x11"`
+ TransID int64 `bin:"off=0x11, siz=8"`
+ DataLen uint16 `bin:"off=0x19, siz=2"` // [ignored-when-writing]
+ NameLen uint16 `bin:"off=0x1b, siz=2"` // [ignored-when-writing]
+ Type FileType `bin:"off=0x1d, siz=1"`
+ binstruct.End `bin:"off=0x1e"`
+ Data []byte `bin:"-"`
+ Name []byte `bin:"-"`
+}
+
+func (o *DirEntry) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ o.Data = dat[n : n+int(o.DataLen)]
+ n += int(o.DataLen)
+ o.Name = dat[n : n+int(o.NameLen)]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o DirEntry) MarshalBinary() ([]byte, error) {
+ o.DataLen = uint16(len(o.Data))
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Data...)
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
+
+type FileType uint8
+
+const (
+ FT_UNKNOWN = FileType(0)
+ FT_REG_FILE = FileType(1)
+ FT_DIR = FileType(2)
+ FT_CHRDEV = FileType(3)
+ FT_BLKDEV = FileType(4)
+ FT_FIFO = FileType(5)
+ FT_SOCK = FileType(6)
+ FT_SYMLINK = FileType(7)
+ FT_XATTR = FileType(8)
+
+ FT_MAX = FileType(9)
+)
+
+func (ft FileType) String() string {
+ names := map[FileType]string{
+ FT_UNKNOWN: "UNKNOWN",
+ FT_REG_FILE: "FILE", // XXX
+ FT_DIR: "DIR",
+ FT_CHRDEV: "CHRDEV",
+ FT_BLKDEV: "BLKDEV",
+ FT_FIFO: "FIFO",
+ FT_SOCK: "SOCK",
+ FT_SYMLINK: "SYMLINK",
+ FT_XATTR: "XATTR",
+ }
+ if name, ok := names[ft]; ok {
+ return name
+ }
+ return fmt.Sprintf("DIR_ITEM.%d", uint8(ft))
+}
diff --git a/lib/btrfs/btrfsitem/item_empty.go b/lib/btrfs/btrfsitem/item_empty.go
new file mode 100644
index 0000000..209b2fc
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_empty.go
@@ -0,0 +1,9 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type Empty struct { // ORPHAN_ITEM=48 TREE_BLOCK_REF=176 SHARED_BLOCK_REF=182 FREE_SPACE_EXTENT=199 QGROUP_RELATION=246
+ binstruct.End `bin:"off=0"`
+}
diff --git a/lib/btrfs/btrfsitem/item_extent.go b/lib/btrfs/btrfsitem/item_extent.go
new file mode 100644
index 0000000..9a9ea55
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extent.go
@@ -0,0 +1,165 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Extent struct { // EXTENT_ITEM=168
+ Head ExtentHeader
+ Info TreeBlockInfo // only if .Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK)
+ Refs []ExtentInlineRef
+}
+
+func (o *Extent) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &o.Head)
+ if err != nil {
+ return n, err
+ }
+ if o.Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK) {
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Info)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ }
+ o.Refs = nil
+ for n < len(dat) {
+ var ref ExtentInlineRef
+ _n, err := binstruct.Unmarshal(dat[n:], &ref)
+ n += _n
+ o.Refs = append(o.Refs, ref)
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (o Extent) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(o.Head)
+ if err != nil {
+ return dat, err
+ }
+ if o.Head.Flags.Has(EXTENT_FLAG_TREE_BLOCK) {
+ bs, err := binstruct.Marshal(o.Info)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ for _, ref := range o.Refs {
+ bs, err := binstruct.Marshal(ref)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ return dat, nil
+}
+
+type ExtentHeader struct {
+ Refs int64 `bin:"off=0, siz=8"`
+ Generation internal.Generation `bin:"off=8, siz=8"`
+ Flags ExtentFlags `bin:"off=16, siz=8"`
+ binstruct.End `bin:"off=24"`
+}
+
+type TreeBlockInfo struct {
+ Key internal.Key `bin:"off=0, siz=0x11"`
+ Level uint8 `bin:"off=0x11, siz=0x8"`
+ binstruct.End `bin:"off=0x19"`
+}
+
+type ExtentFlags uint64
+
+const (
+ EXTENT_FLAG_DATA = ExtentFlags(1 << iota)
+ EXTENT_FLAG_TREE_BLOCK
+)
+
+var extentFlagNames = []string{
+ "DATA",
+ "TREE_BLOCK",
+}
+
+func (f ExtentFlags) Has(req ExtentFlags) bool { return f&req == req }
+func (f ExtentFlags) String() string { return util.BitfieldString(f, extentFlagNames, util.HexNone) }
+
+type ExtentInlineRef struct {
+ Type Type // only 4 valid values: {TREE,SHARED}_BLOCK_REF_KEY, {EXTENT,SHARED}_DATA_REF_KEY
+ Offset uint64 // only when Type != EXTENT_DATA_REF_KEY
+ Body Item // only when Type == *_DATA_REF_KEY
+}
+
+func (o *ExtentInlineRef) UnmarshalBinary(dat []byte) (int, error) {
+ o.Type = Type(dat[0])
+ n := 1
+ switch o.Type {
+ case TREE_BLOCK_REF_KEY, SHARED_BLOCK_REF_KEY:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Offset)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ case EXTENT_DATA_REF_KEY:
+ var dref ExtentDataRef
+ _n, err := binstruct.Unmarshal(dat[n:], &dref)
+ n += _n
+ o.Body = dref
+ if err != nil {
+ return n, err
+ }
+ case SHARED_DATA_REF_KEY:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.Offset)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ var sref SharedDataRef
+ _n, err = binstruct.Unmarshal(dat[n:], &sref)
+ n += _n
+ o.Body = sref
+ if err != nil {
+ return n, err
+ }
+ default:
+ return n, fmt.Errorf("btrfsitem.ExtentInlineRef.UnmarshalBinary: unexpected item type %v", o.Type)
+ }
+ return n, nil
+}
+
+func (o ExtentInlineRef) MarshalBinary() ([]byte, error) {
+ dat := []byte{byte(o.Type)}
+ switch o.Type {
+ case TREE_BLOCK_REF_KEY, SHARED_BLOCK_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Offset)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ case EXTENT_DATA_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Body)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ case SHARED_DATA_REF_KEY:
+ _dat, err := binstruct.Marshal(o.Offset)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ _dat, err = binstruct.Marshal(o.Body)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, err
+ }
+ default:
+ return dat, fmt.Errorf("btrfsitem.ExtentInlineRef.MarshalBinary: unexpected item type %v", o.Type)
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_extentcsum.go b/lib/btrfs/btrfsitem/item_extentcsum.go
new file mode 100644
index 0000000..b27dbde
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extentcsum.go
@@ -0,0 +1,39 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+)
+
+// key.objectid = BTRFS_EXTENT_CSUM_OBJECTID
+// key.offset = laddr of checksummed region
+type ExtentCSum struct { // EXTENT_CSUM=128
+ ChecksumSize int
+ // Checksum of each sector starting at key.offset
+ Sums []btrfssum.CSum
+}
+
+func (o *ExtentCSum) UnmarshalBinary(dat []byte) (int, error) {
+ if o.ChecksumSize == 0 {
+ return 0, fmt.Errorf("btrfs.ExtentCSum.UnmarshalBinary: .ChecksumSize must be set")
+ }
+ for len(dat) >= o.ChecksumSize {
+ var csum btrfssum.CSum
+ copy(csum[:], dat[:o.ChecksumSize])
+ dat = dat[o.ChecksumSize:]
+ o.Sums = append(o.Sums, csum)
+ }
+ return len(o.Sums) * o.ChecksumSize, nil
+}
+
+func (o ExtentCSum) MarshalBinary() ([]byte, error) {
+ if o.ChecksumSize == 0 {
+ return nil, fmt.Errorf("btrfs.ExtentCSum.MarshalBinary: .ChecksumSize must be set")
+ }
+ var dat []byte
+ for _, csum := range o.Sums {
+ dat = append(dat, csum[:o.ChecksumSize]...)
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_extentdataref.go b/lib/btrfs/btrfsitem/item_extentdataref.go
new file mode 100644
index 0000000..aab5426
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_extentdataref.go
@@ -0,0 +1,14 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type ExtentDataRef struct { // EXTENT_DATA_REF=178
+ Root internal.ObjID `bin:"off=0, siz=8"`
+ ObjectID internal.ObjID `bin:"off=8, siz=8"`
+ Offset int64 `bin:"off=16, siz=8"`
+ Count int32 `bin:"off=24, siz=4"`
+ binstruct.End `bin:"off=28"`
+}
diff --git a/lib/btrfs/btrfsitem/item_fileextent.go b/lib/btrfs/btrfsitem/item_fileextent.go
new file mode 100644
index 0000000..2f3ac2b
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_fileextent.go
@@ -0,0 +1,137 @@
+package btrfsitem
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// key.objectid = inode
+// key.offset = offset within file
+type FileExtent struct { // EXTENT_DATA=108
+ Generation internal.Generation `bin:"off=0x0, siz=0x8"` // transaction ID that created this extent
+ RAMBytes int64 `bin:"off=0x8, siz=0x8"` // upper bound of what compressed data will decompress to
+
+ // 32 bits describing the data encoding
+ Compression CompressionType `bin:"off=0x10, siz=0x1"`
+ Encryption uint8 `bin:"off=0x11, siz=0x1"`
+ OtherEncoding uint16 `bin:"off=0x12, siz=0x2"` // reserved for later use
+
+ Type FileExtentType `bin:"off=0x14, siz=0x1"` // inline data or real extent
+
+ binstruct.End `bin:"off=0x15"`
+
+ // only one of these, depending on .Type
+ BodyInline []byte `bin:"-"` // .Type == FILE_EXTENT_INLINE
+ BodyExtent struct { // .Type == FILE_EXTENT_REG or FILE_EXTENT_PREALLOC
+ // Position and size of extent within the device
+ DiskByteNr btrfsvol.LogicalAddr `bin:"off=0x0, siz=0x8"`
+ DiskNumBytes btrfsvol.AddrDelta `bin:"off=0x8, siz=0x8"`
+
+ // Position of data within the extent
+ Offset btrfsvol.AddrDelta `bin:"off=0x10, siz=0x8"`
+
+ // Decompressed/unencrypted size
+ NumBytes int64 `bin:"off=0x18, siz=0x8"`
+
+ binstruct.End `bin:"off=0x20"`
+ } `bin:"-"`
+}
+
+func (o *FileExtent) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ o.BodyInline = dat[n:]
+ n += len(o.BodyInline)
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ _n, err := binstruct.Unmarshal(dat[n:], &o.BodyExtent)
+ n += _n
+ if err != nil {
+ return n, err
+ }
+ default:
+ return n, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+ return n, nil
+}
+
+func (o FileExtent) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ dat = append(dat, o.BodyInline...)
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ bs, err := binstruct.Marshal(o.BodyExtent)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ default:
+ return dat, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+ return dat, nil
+}
+
+type FileExtentType uint8
+
+const (
+ FILE_EXTENT_INLINE = FileExtentType(iota)
+ FILE_EXTENT_REG
+ FILE_EXTENT_PREALLOC
+)
+
+func (o FileExtent) Size() (int64, error) {
+ switch o.Type {
+ case FILE_EXTENT_INLINE:
+ return int64(len(o.BodyInline)), nil
+ case FILE_EXTENT_REG, FILE_EXTENT_PREALLOC:
+ return o.BodyExtent.NumBytes, nil
+ default:
+ return 0, fmt.Errorf("unknown file extent type %v", o.Type)
+ }
+}
+
+func (fet FileExtentType) String() string {
+ names := map[FileExtentType]string{
+ FILE_EXTENT_INLINE: "inline",
+ FILE_EXTENT_REG: "regular",
+ FILE_EXTENT_PREALLOC: "prealloc",
+ }
+ name, ok := names[fet]
+ if !ok {
+ name = "unknown"
+ }
+ return fmt.Sprintf("%d (%s)", fet, name)
+}
+
+type CompressionType uint8
+
+const (
+ COMPRESS_NONE = CompressionType(iota)
+ COMPRESS_ZLIB
+ COMPRESS_LZO
+ COMPRESS_ZSTD
+)
+
+func (ct CompressionType) String() string {
+ names := map[CompressionType]string{
+ COMPRESS_NONE: "none",
+ COMPRESS_ZLIB: "zlib",
+ COMPRESS_LZO: "lzo",
+ COMPRESS_ZSTD: "zstd",
+ }
+ name, ok := names[ct]
+ if !ok {
+ name = "unknown"
+ }
+ return fmt.Sprintf("%d (%s)", ct, name)
+}
diff --git a/lib/btrfs/btrfsitem/item_freespacebitmap.go b/lib/btrfs/btrfsitem/item_freespacebitmap.go
new file mode 100644
index 0000000..6158eb0
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_freespacebitmap.go
@@ -0,0 +1,12 @@
+package btrfsitem
+
+type FreeSpaceBitmap []byte // FREE_SPACE_BITMAP=200
+
+func (o *FreeSpaceBitmap) UnmarshalBinary(dat []byte) (int, error) {
+ *o = dat
+ return len(dat), nil
+}
+
+func (o FreeSpaceBitmap) MarshalBinary() ([]byte, error) {
+ return []byte(o), nil
+}
diff --git a/lib/btrfs/btrfsitem/item_freespaceinfo.go b/lib/btrfs/btrfsitem/item_freespaceinfo.go
new file mode 100644
index 0000000..89f555e
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_freespaceinfo.go
@@ -0,0 +1,11 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type FreeSpaceInfo struct { // FREE_SPACE_INFO=198
+ ExtentCount int32 `bin:"off=0, siz=4"`
+ Flags uint32 `bin:"off=4, siz=4"`
+ binstruct.End `bin:"off=8"`
+}
diff --git a/lib/btrfs/btrfsitem/item_inode.go b/lib/btrfs/btrfsitem/item_inode.go
new file mode 100644
index 0000000..9b1b91b
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_inode.go
@@ -0,0 +1,64 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/linux"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Inode struct { // INODE_ITEM=1
+ Generation internal.Generation `bin:"off=0x00, siz=0x08"`
+ TransID int64 `bin:"off=0x08, siz=0x08"`
+ Size int64 `bin:"off=0x10, siz=0x08"` // stat
+ NumBytes int64 `bin:"off=0x18, siz=0x08"`
+ BlockGroup int64 `bin:"off=0x20, siz=0x08"`
+ NLink int32 `bin:"off=0x28, siz=0x04"` // stat
+ UID int32 `bin:"off=0x2C, siz=0x04"` // stat
+ GID int32 `bin:"off=0x30, siz=0x04"` // stat
+ Mode linux.StatMode `bin:"off=0x34, siz=0x04"` // stat
+ RDev int64 `bin:"off=0x38, siz=0x08"` // stat
+ Flags InodeFlags `bin:"off=0x40, siz=0x08"` // statx.stx_attributes, sorta
+ Sequence int64 `bin:"off=0x48, siz=0x08"` // NFS
+ Reserved [4]int64 `bin:"off=0x50, siz=0x20"`
+ ATime internal.Time `bin:"off=0x70, siz=0x0c"` // stat
+ CTime internal.Time `bin:"off=0x7c, siz=0x0c"` // stat
+ MTime internal.Time `bin:"off=0x88, siz=0x0c"` // stat
+ OTime internal.Time `bin:"off=0x94, siz=0x0c"` // statx.stx_btime (why is this called "otime" instead of "btime"?)
+ binstruct.End `bin:"off=0xa0"`
+}
+
+type InodeFlags uint64
+
+const (
+ INODE_NODATASUM = InodeFlags(1 << iota)
+ INODE_NODATACOW
+ INODE_READONLY
+ INODE_NOCOMPRESS
+ INODE_PREALLOC
+ INODE_SYNC
+ INODE_IMMUTABLE
+ INODE_APPEND
+ INODE_NODUMP
+ INODE_NOATIME
+ INODE_DIRSYNC
+ INODE_COMPRESS
+)
+
+var inodeFlagNames = []string{
+ "NODATASUM",
+ "NODATACOW",
+ "READONLY",
+ "NOCOMPRESS",
+ "PREALLOC",
+ "SYNC",
+ "IMMUTABLE",
+ "APPEND",
+ "NODUMP",
+ "NOATIME",
+ "DIRSYNC",
+ "COMPRESS",
+}
+
+func (f InodeFlags) Has(req InodeFlags) bool { return f&req == req }
+func (f InodeFlags) String() string { return util.BitfieldString(f, inodeFlagNames, util.HexLower) }
diff --git a/lib/btrfs/btrfsitem/item_inoderef.go b/lib/btrfs/btrfsitem/item_inoderef.go
new file mode 100644
index 0000000..80d70e1
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_inoderef.go
@@ -0,0 +1,35 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+// key.objectid = inode number of the file
+// key.offset = inode number of the parent file
+type InodeRef struct { // INODE_REF=12
+ Index int64 `bin:"off=0x0, siz=0x8"`
+ NameLen uint16 `bin:"off=0x8, siz=0x2"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0xa"`
+ Name []byte `bin:"-"`
+}
+
+func (o *InodeRef) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ dat = dat[n:]
+ o.Name = dat[:o.NameLen]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o InodeRef) MarshalBinary() ([]byte, error) {
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_metadata.go b/lib/btrfs/btrfsitem/item_metadata.go
new file mode 100644
index 0000000..d51a340
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_metadata.go
@@ -0,0 +1,44 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+// Metadata is like Extent, but doesn't have .Info.
+type Metadata struct { // METADATA_ITEM=169
+ Head ExtentHeader
+ Refs []ExtentInlineRef
+}
+
+func (o *Metadata) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &o.Head)
+ if err != nil {
+ return n, err
+ }
+ o.Refs = nil
+ for n < len(dat) {
+ var ref ExtentInlineRef
+ _n, err := binstruct.Unmarshal(dat[n:], &ref)
+ n += _n
+ o.Refs = append(o.Refs, ref)
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+func (o Metadata) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(o.Head)
+ if err != nil {
+ return dat, err
+ }
+ for _, ref := range o.Refs {
+ bs, err := binstruct.Marshal(ref)
+ dat = append(dat, bs...)
+ if err != nil {
+ return dat, err
+ }
+ }
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_persistent.go b/lib/btrfs/btrfsitem/item_persistent.go
new file mode 100644
index 0000000..cbbae76
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_persistent.go
@@ -0,0 +1,19 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+const (
+ DEV_STAT_WRITE_ERRS = iota
+ DEV_STAT_READ_ERRS
+ DEV_STAT_FLUSH_ERRS
+ DEV_STAT_CORRUPTION_ERRS
+ DEV_STAT_GENERATION_ERRS
+ DEV_STAT_VALUES_MAX
+)
+
+type DevStats struct { // PERSISTENT_ITEM=249
+ Values [DEV_STAT_VALUES_MAX]int64 `bin:"off=0, siz=40"`
+ binstruct.End `bin:"off=40"`
+}
diff --git a/lib/btrfs/btrfsitem/item_root.go b/lib/btrfs/btrfsitem/item_root.go
new file mode 100644
index 0000000..ff9311f
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_root.go
@@ -0,0 +1,51 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Root struct { // ROOT_ITEM=132
+ Inode Inode `bin:"off=0x000, siz=0xa0"`
+ Generation internal.Generation `bin:"off=0x0a0, siz=0x08"`
+ RootDirID internal.ObjID `bin:"off=0x0a8, siz=0x08"`
+ ByteNr btrfsvol.LogicalAddr `bin:"off=0x0b0, siz=0x08"`
+ ByteLimit int64 `bin:"off=0x0b8, siz=0x08"`
+ BytesUsed int64 `bin:"off=0x0c0, siz=0x08"`
+ LastSnapshot int64 `bin:"off=0x0c8, siz=0x08"`
+ Flags RootFlags `bin:"off=0x0d0, siz=0x08"`
+ Refs int32 `bin:"off=0x0d8, siz=0x04"`
+ DropProgress internal.Key `bin:"off=0x0dc, siz=0x11"`
+ DropLevel uint8 `bin:"off=0x0ed, siz=0x01"`
+ Level uint8 `bin:"off=0x0ee, siz=0x01"`
+ GenerationV2 internal.Generation `bin:"off=0x0ef, siz=0x08"`
+ UUID util.UUID `bin:"off=0x0f7, siz=0x10"`
+ ParentUUID util.UUID `bin:"off=0x107, siz=0x10"`
+ ReceivedUUID util.UUID `bin:"off=0x117, siz=0x10"`
+ CTransID int64 `bin:"off=0x127, siz=0x08"`
+ OTransID int64 `bin:"off=0x12f, siz=0x08"`
+ STransID int64 `bin:"off=0x137, siz=0x08"`
+ RTransID int64 `bin:"off=0x13f, siz=0x08"`
+ CTime internal.Time `bin:"off=0x147, siz=0x0c"`
+ OTime internal.Time `bin:"off=0x153, siz=0x0c"`
+ STime internal.Time `bin:"off=0x15f, siz=0x0c"`
+ RTime internal.Time `bin:"off=0x16b, siz=0x0c"`
+ GlobalTreeID internal.ObjID `bin:"off=0x177, siz=0x08"`
+ Reserved [7]int64 `bin:"off=0x17f, siz=0x38"`
+ binstruct.End `bin:"off=0x1b7"`
+}
+
+type RootFlags uint64
+
+const (
+ ROOT_SUBVOL_RDONLY = RootFlags(1 << iota)
+)
+
+var rootFlagNames = []string{
+ "SUBVOL_RDONLY",
+}
+
+func (f RootFlags) Has(req RootFlags) bool { return f&req == req }
+func (f RootFlags) String() string { return util.BitfieldString(f, rootFlagNames, util.HexLower) }
diff --git a/lib/btrfs/btrfsitem/item_rootref.go b/lib/btrfs/btrfsitem/item_rootref.go
new file mode 100644
index 0000000..c851474
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_rootref.go
@@ -0,0 +1,34 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type RootRef struct { // ROOT_REF=156 ROOT_BACKREF=144
+ DirID internal.ObjID `bin:"off=0x00, siz=0x8"`
+ Sequence int64 `bin:"off=0x08, siz=0x8"`
+ NameLen uint16 `bin:"off=0x10, siz=0x2"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0x12"`
+ Name []byte `bin:"-"`
+}
+
+func (o *RootRef) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.UnmarshalWithoutInterface(dat, o)
+ if err != nil {
+ return n, err
+ }
+ o.Name = dat[n : n+int(o.NameLen)]
+ n += int(o.NameLen)
+ return n, nil
+}
+
+func (o RootRef) MarshalBinary() ([]byte, error) {
+ o.NameLen = uint16(len(o.Name))
+ dat, err := binstruct.MarshalWithoutInterface(o)
+ if err != nil {
+ return dat, err
+ }
+ dat = append(dat, o.Name...)
+ return dat, nil
+}
diff --git a/lib/btrfs/btrfsitem/item_shareddataref.go b/lib/btrfs/btrfsitem/item_shareddataref.go
new file mode 100644
index 0000000..63897aa
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_shareddataref.go
@@ -0,0 +1,10 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type SharedDataRef struct { // SHARED_DATA_REF=184
+ Count int32 `bin:"off=0, siz=4"`
+ binstruct.End `bin:"off=4"`
+}
diff --git a/lib/btrfs/btrfsitem/item_untyped.go b/lib/btrfs/btrfsitem/item_untyped.go
new file mode 100644
index 0000000..71a9af4
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_untyped.go
@@ -0,0 +1,14 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type FreeSpaceHeader struct { // UNTYPED=0:FREE_SPACE_OBJECTID
+ Location internal.Key `bin:"off=0x00, siz=0x11"`
+ Generation internal.Generation `bin:"off=0x11, siz=0x8"`
+ NumEntries int64 `bin:"off=0x19, siz=0x8"`
+ NumBitmaps int64 `bin:"off=0x21, siz=0x8"`
+ binstruct.End `bin:"off=0x29"`
+}
diff --git a/lib/btrfs/btrfsitem/item_uuid.go b/lib/btrfs/btrfsitem/item_uuid.go
new file mode 100644
index 0000000..6c7d4f0
--- /dev/null
+++ b/lib/btrfs/btrfsitem/item_uuid.go
@@ -0,0 +1,16 @@
+package btrfsitem
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+// The Key for this item is a UUID, and the item is a subvolume IDs
+// that that UUID maps to.
+//
+// key.objectid = first half of UUID
+// key.offset = second half of UUID
+type UUIDMap struct { // UUID_SUBVOL=251 UUID_RECEIVED_SUBVOL=252
+ ObjID internal.ObjID `bin:"off=0, siz=8"`
+ binstruct.End `bin:"off=8"`
+}
diff --git a/lib/btrfs/btrfsitem/items.go b/lib/btrfs/btrfsitem/items.go
new file mode 100644
index 0000000..33ff390
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items.go
@@ -0,0 +1,77 @@
+package btrfsitem
+
+import (
+ "fmt"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+type Type = internal.ItemType
+
+type Item interface {
+ isItem()
+}
+
+type Error struct {
+ Dat []byte
+ Err error
+}
+
+func (Error) isItem() {}
+
+func (o Error) MarshalBinary() ([]byte, error) {
+ return o.Dat, nil
+}
+
+func (o *Error) UnmarshalBinary(dat []byte) (int, error) {
+ o.Dat = dat
+ return len(dat), nil
+}
+
+// Rather than returning a separate error value, return an Error item.
+func UnmarshalItem(key internal.Key, csumType btrfssum.CSumType, dat []byte) Item {
+ var gotyp reflect.Type
+ if key.ItemType == UNTYPED_KEY {
+ var ok bool
+ gotyp, ok = untypedObjID2gotype[key.ObjectID]
+ if !ok {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v, ObjectID:%v}, dat): unknown object ID for untyped item",
+ key.ItemType, key.ObjectID),
+ }
+ }
+ } else {
+ var ok bool
+ gotyp, ok = keytype2gotype[key.ItemType]
+ if !ok {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): unknown item type", key.ItemType),
+ }
+ }
+ }
+ retPtr := reflect.New(gotyp)
+ if csums, ok := retPtr.Interface().(*ExtentCSum); ok {
+ csums.ChecksumSize = csumType.Size()
+ }
+ n, err := binstruct.Unmarshal(dat, retPtr.Interface())
+ if err != nil {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): %w", key.ItemType, err),
+ }
+
+ }
+ if n < len(dat) {
+ return Error{
+ Dat: dat,
+ Err: fmt.Errorf("btrfsitem.UnmarshalItem({ItemType:%v}, dat): left over data: got %v bytes but only consumed %v",
+ key.ItemType, len(dat), n),
+ }
+ }
+ return retPtr.Elem().Interface().(Item)
+}
diff --git a/lib/btrfs/btrfsitem/items.txt b/lib/btrfs/btrfsitem/items.txt
new file mode 100644
index 0000000..7898775
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items.txt
@@ -0,0 +1,29 @@
+BLOCK_GROUP_ITEM=192 BlockGroup
+CHUNK_ITEM=228 Chunk
+DEV_EXTENT=204 DevExtent
+DEV_ITEM=216 Dev
+DIR_INDEX=96 DirEntries
+DIR_ITEM=84 DirEntries
+EXTENT_CSUM=128 ExtentCSum
+EXTENT_DATA=108 FileExtent
+EXTENT_DATA_REF=178 ExtentDataRef
+EXTENT_ITEM=168 Extent
+FREE_SPACE_BITMAP=200 FreeSpaceBitmap
+FREE_SPACE_EXTENT=199 Empty
+FREE_SPACE_INFO=198 FreeSpaceInfo
+INODE_ITEM=1 Inode
+INODE_REF=12 InodeRef
+METADATA_ITEM=169 Metadata
+ORPHAN_ITEM=48 Empty
+PERSISTENT_ITEM=249 DevStats
+QGROUP_RELATION=246 Empty
+ROOT_BACKREF=144 RootRef
+ROOT_ITEM=132 Root
+ROOT_REF=156 RootRef
+SHARED_BLOCK_REF=182 Empty
+SHARED_DATA_REF=184 SharedDataRef
+TREE_BLOCK_REF=176 Empty
+UNTYPED=0:FREE_SPACE_OBJECTID FreeSpaceHeader
+UUID_RECEIVED_SUBVOL=252 UUIDMap
+UUID_SUBVOL=251 UUIDMap
+XATTR_ITEM=24 DirEntries
diff --git a/lib/btrfs/btrfsitem/items_gen.go b/lib/btrfs/btrfsitem/items_gen.go
new file mode 100644
index 0000000..3b84d60
--- /dev/null
+++ b/lib/btrfs/btrfsitem/items_gen.go
@@ -0,0 +1,97 @@
+// Code generated by Make. DO NOT EDIT.
+
+package btrfsitem
+
+import (
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/internal"
+)
+
+const (
+ BLOCK_GROUP_ITEM_KEY = internal.BLOCK_GROUP_ITEM_KEY
+ CHUNK_ITEM_KEY = internal.CHUNK_ITEM_KEY
+ DEV_EXTENT_KEY = internal.DEV_EXTENT_KEY
+ DEV_ITEM_KEY = internal.DEV_ITEM_KEY
+ DIR_INDEX_KEY = internal.DIR_INDEX_KEY
+ DIR_ITEM_KEY = internal.DIR_ITEM_KEY
+ EXTENT_CSUM_KEY = internal.EXTENT_CSUM_KEY
+ EXTENT_DATA_KEY = internal.EXTENT_DATA_KEY
+ EXTENT_DATA_REF_KEY = internal.EXTENT_DATA_REF_KEY
+ EXTENT_ITEM_KEY = internal.EXTENT_ITEM_KEY
+ FREE_SPACE_BITMAP_KEY = internal.FREE_SPACE_BITMAP_KEY
+ FREE_SPACE_EXTENT_KEY = internal.FREE_SPACE_EXTENT_KEY
+ FREE_SPACE_INFO_KEY = internal.FREE_SPACE_INFO_KEY
+ INODE_ITEM_KEY = internal.INODE_ITEM_KEY
+ INODE_REF_KEY = internal.INODE_REF_KEY
+ METADATA_ITEM_KEY = internal.METADATA_ITEM_KEY
+ ORPHAN_ITEM_KEY = internal.ORPHAN_ITEM_KEY
+ PERSISTENT_ITEM_KEY = internal.PERSISTENT_ITEM_KEY
+ QGROUP_RELATION_KEY = internal.QGROUP_RELATION_KEY
+ ROOT_BACKREF_KEY = internal.ROOT_BACKREF_KEY
+ ROOT_ITEM_KEY = internal.ROOT_ITEM_KEY
+ ROOT_REF_KEY = internal.ROOT_REF_KEY
+ SHARED_BLOCK_REF_KEY = internal.SHARED_BLOCK_REF_KEY
+ SHARED_DATA_REF_KEY = internal.SHARED_DATA_REF_KEY
+ TREE_BLOCK_REF_KEY = internal.TREE_BLOCK_REF_KEY
+ UNTYPED_KEY = internal.UNTYPED_KEY
+ UUID_RECEIVED_SUBVOL_KEY = internal.UUID_RECEIVED_SUBVOL_KEY
+ UUID_SUBVOL_KEY = internal.UUID_SUBVOL_KEY
+ XATTR_ITEM_KEY = internal.XATTR_ITEM_KEY
+)
+
+var keytype2gotype = map[Type]reflect.Type{
+ BLOCK_GROUP_ITEM_KEY: reflect.TypeOf(BlockGroup{}),
+ CHUNK_ITEM_KEY: reflect.TypeOf(Chunk{}),
+ DEV_EXTENT_KEY: reflect.TypeOf(DevExtent{}),
+ DEV_ITEM_KEY: reflect.TypeOf(Dev{}),
+ DIR_INDEX_KEY: reflect.TypeOf(DirEntries{}),
+ DIR_ITEM_KEY: reflect.TypeOf(DirEntries{}),
+ EXTENT_CSUM_KEY: reflect.TypeOf(ExtentCSum{}),
+ EXTENT_DATA_KEY: reflect.TypeOf(FileExtent{}),
+ EXTENT_DATA_REF_KEY: reflect.TypeOf(ExtentDataRef{}),
+ EXTENT_ITEM_KEY: reflect.TypeOf(Extent{}),
+ FREE_SPACE_BITMAP_KEY: reflect.TypeOf(FreeSpaceBitmap{}),
+ FREE_SPACE_EXTENT_KEY: reflect.TypeOf(Empty{}),
+ FREE_SPACE_INFO_KEY: reflect.TypeOf(FreeSpaceInfo{}),
+ INODE_ITEM_KEY: reflect.TypeOf(Inode{}),
+ INODE_REF_KEY: reflect.TypeOf(InodeRef{}),
+ METADATA_ITEM_KEY: reflect.TypeOf(Metadata{}),
+ ORPHAN_ITEM_KEY: reflect.TypeOf(Empty{}),
+ PERSISTENT_ITEM_KEY: reflect.TypeOf(DevStats{}),
+ QGROUP_RELATION_KEY: reflect.TypeOf(Empty{}),
+ ROOT_BACKREF_KEY: reflect.TypeOf(RootRef{}),
+ ROOT_ITEM_KEY: reflect.TypeOf(Root{}),
+ ROOT_REF_KEY: reflect.TypeOf(RootRef{}),
+ SHARED_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
+ SHARED_DATA_REF_KEY: reflect.TypeOf(SharedDataRef{}),
+ TREE_BLOCK_REF_KEY: reflect.TypeOf(Empty{}),
+ UUID_RECEIVED_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
+ UUID_SUBVOL_KEY: reflect.TypeOf(UUIDMap{}),
+ XATTR_ITEM_KEY: reflect.TypeOf(DirEntries{}),
+}
+var untypedObjID2gotype = map[internal.ObjID]reflect.Type{
+ internal.FREE_SPACE_OBJECTID: reflect.TypeOf(FreeSpaceHeader{}),
+}
+
+func (BlockGroup) isItem() {}
+func (Chunk) isItem() {}
+func (Dev) isItem() {}
+func (DevExtent) isItem() {}
+func (DevStats) isItem() {}
+func (DirEntries) isItem() {}
+func (Empty) isItem() {}
+func (Extent) isItem() {}
+func (ExtentCSum) isItem() {}
+func (ExtentDataRef) isItem() {}
+func (FileExtent) isItem() {}
+func (FreeSpaceBitmap) isItem() {}
+func (FreeSpaceHeader) isItem() {}
+func (FreeSpaceInfo) isItem() {}
+func (Inode) isItem() {}
+func (InodeRef) isItem() {}
+func (Metadata) isItem() {}
+func (Root) isItem() {}
+func (RootRef) isItem() {}
+func (SharedDataRef) isItem() {}
+func (UUIDMap) isItem() {}
diff --git a/lib/btrfs/btrfssum/csum.go b/lib/btrfs/btrfssum/csum.go
new file mode 100644
index 0000000..11a8385
--- /dev/null
+++ b/lib/btrfs/btrfssum/csum.go
@@ -0,0 +1,78 @@
+package btrfssum
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "hash/crc32"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type CSum [0x20]byte
+
+func (csum CSum) String() string {
+ return hex.EncodeToString(csum[:])
+}
+
+func (csum CSum) Fmt(typ CSumType) string {
+ return hex.EncodeToString(csum[:typ.Size()])
+}
+
+func (csum CSum) Format(f fmt.State, verb rune) {
+ util.FormatByteArrayStringer(csum, csum[:], f, verb)
+}
+
+type CSumType uint16
+
+const (
+ TYPE_CRC32 = CSumType(iota)
+ TYPE_XXHASH
+ TYPE_SHA256
+ TYPE_BLAKE2
+)
+
+func (typ CSumType) String() string {
+ names := map[CSumType]string{
+ TYPE_CRC32: "crc32c",
+ TYPE_XXHASH: "xxhash64",
+ TYPE_SHA256: "sha256",
+ TYPE_BLAKE2: "blake2",
+ }
+ if name, ok := names[typ]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", typ)
+}
+
+func (typ CSumType) Size() int {
+ sizes := map[CSumType]int{
+ TYPE_CRC32: 4,
+ TYPE_XXHASH: 8,
+ TYPE_SHA256: 32,
+ TYPE_BLAKE2: 32,
+ }
+ if size, ok := sizes[typ]; ok {
+ return size
+ }
+ return len(CSum{})
+}
+
+func (typ CSumType) Sum(data []byte) (CSum, error) {
+ switch typ {
+ case TYPE_CRC32:
+ crc := crc32.Update(0, crc32.MakeTable(crc32.Castagnoli), data)
+
+ var ret CSum
+ binary.LittleEndian.PutUint32(ret[:], crc)
+ return ret, nil
+ case TYPE_XXHASH:
+ panic("not implemented")
+ case TYPE_SHA256:
+ panic("not implemented")
+ case TYPE_BLAKE2:
+ panic("not implemented")
+ default:
+ return CSum{}, fmt.Errorf("unknown checksum type: %v", typ)
+ }
+}
diff --git a/lib/btrfs/btrfssum/csum_test.go b/lib/btrfs/btrfssum/csum_test.go
new file mode 100644
index 0000000..755ecc1
--- /dev/null
+++ b/lib/btrfs/btrfssum/csum_test.go
@@ -0,0 +1,35 @@
+package btrfssum_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+)
+
+func TestCSumFormat(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ InputSum btrfssum.CSum
+ InputFmt string
+ Output string
+ }
+ csum := btrfssum.CSum{0xbd, 0x7b, 0x41, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ testcases := map[string]TestCase{
+ "s": {InputSum: csum, InputFmt: "%s", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "x": {InputSum: csum, InputFmt: "%x", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "v": {InputSum: csum, InputFmt: "%v", Output: "bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "70s": {InputSum: csum, InputFmt: "|% 70s", Output: "| bd7b41f400000000000000000000000000000000000000000000000000000000"},
+ "#180v": {InputSum: csum, InputFmt: "%#180v", Output: " btrfssum.CSum{0xbd, 0x7b, 0x41, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}"},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ actual := fmt.Sprintf(tc.InputFmt, tc.InputSum)
+ assert.Equal(t, tc.Output, actual)
+ })
+ }
+}
diff --git a/lib/btrfs/btrfsvol/addr.go b/lib/btrfs/btrfsvol/addr.go
new file mode 100644
index 0000000..6c67826
--- /dev/null
+++ b/lib/btrfs/btrfsvol/addr.go
@@ -0,0 +1,47 @@
+package btrfsvol
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type (
+ PhysicalAddr int64
+ LogicalAddr int64
+ AddrDelta int64
+)
+
+func formatAddr(addr int64, f fmt.State, verb rune) {
+ switch verb {
+ case 'v', 's', 'q':
+ str := fmt.Sprintf("%#016x", addr)
+ fmt.Fprintf(f, util.FmtStateString(f, verb), str)
+ default:
+ fmt.Fprintf(f, util.FmtStateString(f, verb), addr)
+ }
+}
+
+func (a PhysicalAddr) Format(f fmt.State, verb rune) { formatAddr(int64(a), f, verb) }
+func (a LogicalAddr) Format(f fmt.State, verb rune) { formatAddr(int64(a), f, verb) }
+func (d AddrDelta) Format(f fmt.State, verb rune) { formatAddr(int64(d), f, verb) }
+
+func (a PhysicalAddr) Sub(b PhysicalAddr) AddrDelta { return AddrDelta(a - b) }
+func (a LogicalAddr) Sub(b LogicalAddr) AddrDelta { return AddrDelta(a - b) }
+
+func (a PhysicalAddr) Add(b AddrDelta) PhysicalAddr { return a + PhysicalAddr(b) }
+func (a LogicalAddr) Add(b AddrDelta) LogicalAddr { return a + LogicalAddr(b) }
+
+type DeviceID uint64
+
+type QualifiedPhysicalAddr struct {
+ Dev DeviceID
+ Addr PhysicalAddr
+}
+
+func (a QualifiedPhysicalAddr) Cmp(b QualifiedPhysicalAddr) int {
+ if d := int(a.Dev - b.Dev); d != 0 {
+ return d
+ }
+ return int(a.Addr - b.Addr)
+}
diff --git a/lib/btrfs/btrfsvol/addr_test.go b/lib/btrfs/btrfsvol/addr_test.go
new file mode 100644
index 0000000..aae6378
--- /dev/null
+++ b/lib/btrfs/btrfsvol/addr_test.go
@@ -0,0 +1,36 @@
+package btrfsvol_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+)
+
+func TestAddrFormat(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ InputAddr btrfsvol.LogicalAddr
+ InputFmt string
+ Output string
+ }
+ addr := btrfsvol.LogicalAddr(0x3a41678000)
+ testcases := map[string]TestCase{
+ "v": {InputAddr: addr, InputFmt: "%v", Output: "0x0000003a41678000"},
+ "s": {InputAddr: addr, InputFmt: "%s", Output: "0x0000003a41678000"},
+ "q": {InputAddr: addr, InputFmt: "%q", Output: `"0x0000003a41678000"`},
+ "x": {InputAddr: addr, InputFmt: "%x", Output: "3a41678000"},
+ "d": {InputAddr: addr, InputFmt: "%d", Output: "250205405184"},
+ "neg": {InputAddr: -1, InputFmt: "%v", Output: "-0x000000000000001"},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ actual := fmt.Sprintf(tc.InputFmt, tc.InputAddr)
+ assert.Equal(t, tc.Output, actual)
+ })
+ }
+}
diff --git a/lib/btrfs/btrfsvol/blockgroupflags.go b/lib/btrfs/btrfsvol/blockgroupflags.go
new file mode 100644
index 0000000..1a194d7
--- /dev/null
+++ b/lib/btrfs/btrfsvol/blockgroupflags.go
@@ -0,0 +1,51 @@
+package btrfsvol
+
+import (
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type BlockGroupFlags uint64
+
+const (
+ BLOCK_GROUP_DATA = BlockGroupFlags(1 << iota)
+ BLOCK_GROUP_SYSTEM
+ BLOCK_GROUP_METADATA
+ BLOCK_GROUP_RAID0
+ BLOCK_GROUP_RAID1
+ BLOCK_GROUP_DUP
+ BLOCK_GROUP_RAID10
+ BLOCK_GROUP_RAID5
+ BLOCK_GROUP_RAID6
+ BLOCK_GROUP_RAID1C3
+ BLOCK_GROUP_RAID1C4
+
+ BLOCK_GROUP_RAID_MASK = (BLOCK_GROUP_RAID1 | BLOCK_GROUP_DUP | BLOCK_GROUP_RAID10 | BLOCK_GROUP_RAID5 | BLOCK_GROUP_RAID6 | BLOCK_GROUP_RAID1C3 | BLOCK_GROUP_RAID1C4)
+)
+
+var blockGroupFlagNames = []string{
+ "DATA",
+ "SYSTEM",
+ "METADATA",
+
+ "RAID0",
+ "RAID1",
+ "DUP",
+ "RAID10",
+ "RAID5",
+ "RAID6",
+ "RAID1C3",
+ "RAID1C4",
+}
+
+func (f BlockGroupFlags) Has(req BlockGroupFlags) bool { return f&req == req }
+func (f BlockGroupFlags) String() string {
+ ret := util.BitfieldString(f, blockGroupFlagNames, util.HexNone)
+ if f&BLOCK_GROUP_RAID_MASK == 0 {
+ if ret == "" {
+ ret = "single"
+ } else {
+ ret += "|single"
+ }
+ }
+ return ret
+}
diff --git a/lib/btrfs/btrfsvol/chunk.go b/lib/btrfs/btrfsvol/chunk.go
new file mode 100644
index 0000000..6aea483
--- /dev/null
+++ b/lib/btrfs/btrfsvol/chunk.go
@@ -0,0 +1,96 @@
+package btrfsvol
+
+import (
+ "fmt"
+ "sort"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// logical => []physical
+type chunkMapping struct {
+ LAddr LogicalAddr
+ PAddrs []QualifiedPhysicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+type ChunkMapping = chunkMapping
+
+// return -1 if 'a' is wholly to the left of 'b'
+// return 0 if there is some overlap between 'a' and 'b'
+// return 1 if 'a is wholly to the right of 'b'
+func (a chunkMapping) cmpRange(b chunkMapping) int {
+ switch {
+ case a.LAddr.Add(a.Size) <= b.LAddr:
+ // 'a' is wholly to the left of 'b'.
+ return -1
+ case b.LAddr.Add(b.Size) <= a.LAddr:
+ // 'a' is wholly to the right of 'b'.
+ return 1
+ default:
+ // There is some overlap.
+ return 0
+ }
+}
+
+func (a chunkMapping) union(rest ...chunkMapping) (chunkMapping, error) {
+ // sanity check
+ for _, chunk := range rest {
+ if a.cmpRange(chunk) != 0 {
+ return chunkMapping{}, fmt.Errorf("chunks don't overlap")
+ }
+ }
+ chunks := append([]chunkMapping{a}, rest...)
+ // figure out the logical range (.LAddr and .Size)
+ beg := chunks[0].LAddr
+ end := chunks[0].LAddr.Add(chunks[0].Size)
+ for _, chunk := range chunks {
+ beg = util.Min(beg, chunk.LAddr)
+ end = util.Max(end, chunk.LAddr.Add(chunk.Size))
+ }
+ ret := chunkMapping{
+ LAddr: beg,
+ Size: end.Sub(beg),
+ }
+ for _, chunk := range chunks {
+ if chunk.SizeLocked {
+ ret.SizeLocked = true
+ if ret.Size != chunk.Size {
+ return chunkMapping{}, fmt.Errorf("member chunk has locked size=%v, but union would have size=%v",
+ chunk.Size, ret.Size)
+ }
+ }
+ }
+ // figure out the physical stripes (.PAddrs)
+ paddrs := make(map[QualifiedPhysicalAddr]struct{})
+ for _, chunk := range chunks {
+ offsetWithinRet := chunk.LAddr.Sub(ret.LAddr)
+ for _, stripe := range chunk.PAddrs {
+ paddrs[QualifiedPhysicalAddr{
+ Dev: stripe.Dev,
+ Addr: stripe.Addr.Add(-offsetWithinRet),
+ }] = struct{}{}
+ }
+ }
+ ret.PAddrs = util.MapKeys(paddrs)
+ sort.Slice(ret.PAddrs, func(i, j int) bool {
+ return ret.PAddrs[i].Cmp(ret.PAddrs[j]) < 0
+ })
+ // figure out the flags (.Flags)
+ for _, chunk := range chunks {
+ if chunk.Flags == nil {
+ continue
+ }
+ if ret.Flags == nil {
+ val := *chunk.Flags
+ ret.Flags = &val
+ }
+ if *ret.Flags != *chunk.Flags {
+ return ret, fmt.Errorf("mismatch flags: %v != %v", *ret.Flags, *chunk.Flags)
+ }
+ }
+ // done
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsvol/devext.go b/lib/btrfs/btrfsvol/devext.go
new file mode 100644
index 0000000..43c6255
--- /dev/null
+++ b/lib/btrfs/btrfsvol/devext.go
@@ -0,0 +1,89 @@
+package btrfsvol
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// physical => logical
+type devextMapping struct {
+ PAddr PhysicalAddr
+ LAddr LogicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+// return -1 if 'a' is wholly to the left of 'b'
+// return 0 if there is some overlap between 'a' and 'b'
+// return 1 if 'a is wholly to the right of 'b'
+func (a devextMapping) cmpRange(b devextMapping) int {
+ switch {
+ case a.PAddr.Add(a.Size) <= b.PAddr:
+ // 'a' is wholly to the left of 'b'.
+ return -1
+ case b.PAddr.Add(b.Size) <= a.PAddr:
+ // 'a' is wholly to the right of 'b'.
+ return 1
+ default:
+ // There is some overlap.
+ return 0
+ }
+}
+
+func (a devextMapping) union(rest ...devextMapping) (devextMapping, error) {
+ // sanity check
+ for _, ext := range rest {
+ if a.cmpRange(ext) != 0 {
+ return devextMapping{}, fmt.Errorf("devexts don't overlap")
+ }
+ }
+ exts := append([]devextMapping{a}, rest...)
+ // figure out the physical range (.PAddr and .Size)
+ beg := exts[0].PAddr
+ end := beg.Add(exts[0].Size)
+ for _, ext := range exts {
+ beg = util.Min(beg, ext.PAddr)
+ end = util.Max(end, ext.PAddr.Add(ext.Size))
+ }
+ ret := devextMapping{
+ PAddr: beg,
+ Size: end.Sub(beg),
+ }
+ for _, ext := range exts {
+ if ext.SizeLocked {
+ ret.SizeLocked = true
+ if ret.Size != ext.Size {
+ return devextMapping{}, fmt.Errorf("member devext has locked size=%v, but union would have size=%v",
+ ext.Size, ret.Size)
+ }
+ }
+ }
+ // figure out the logical range (.LAddr)
+ first := true
+ for _, ext := range exts {
+ offsetWithinRet := ext.PAddr.Sub(ret.PAddr)
+ laddr := ext.LAddr.Add(-offsetWithinRet)
+ if first {
+ ret.LAddr = laddr
+ } else if laddr != ret.LAddr {
+ return ret, fmt.Errorf("devexts don't agree on laddr: %v != %v", ret.LAddr, laddr)
+ }
+ }
+ // figure out the flags (.Flags)
+ for _, ext := range exts {
+ if ext.Flags == nil {
+ continue
+ }
+ if ret.Flags == nil {
+ val := *ext.Flags
+ ret.Flags = &val
+ }
+ if *ret.Flags != *ext.Flags {
+ return ret, fmt.Errorf("mismatch flags: %v != %v", *ret.Flags, *ext.Flags)
+ }
+ }
+ // done
+ return ret, nil
+}
diff --git a/lib/btrfs/btrfsvol/lvm.go b/lib/btrfs/btrfsvol/lvm.go
new file mode 100644
index 0000000..3b9ccf6
--- /dev/null
+++ b/lib/btrfs/btrfsvol/lvm.go
@@ -0,0 +1,355 @@
+package btrfsvol
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/rbtree"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type LogicalVolume[PhysicalVolume util.File[PhysicalAddr]] struct {
+ name string
+
+ id2pv map[DeviceID]PhysicalVolume
+
+ logical2physical *rbtree.Tree[LogicalAddr, chunkMapping]
+ physical2logical map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping]
+}
+
+var _ util.File[LogicalAddr] = (*LogicalVolume[util.File[PhysicalAddr]])(nil)
+
+func (lv *LogicalVolume[PhysicalVolume]) init() {
+ if lv.id2pv == nil {
+ lv.id2pv = make(map[DeviceID]PhysicalVolume)
+ }
+ if lv.logical2physical == nil {
+ lv.logical2physical = &rbtree.Tree[LogicalAddr, chunkMapping]{
+ KeyFn: func(chunk chunkMapping) LogicalAddr {
+ return chunk.LAddr
+ },
+ }
+ }
+ if lv.physical2logical == nil {
+ lv.physical2logical = make(map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping], len(lv.id2pv))
+ }
+ for devid := range lv.id2pv {
+ if _, ok := lv.physical2logical[devid]; !ok {
+ lv.physical2logical[devid] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ }
+ }
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) SetName(name string) {
+ lv.name = name
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Name() string {
+ return lv.name
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Size() (LogicalAddr, error) {
+ lv.init()
+ lastChunk := lv.logical2physical.Max()
+ if lastChunk == nil {
+ return 0, nil
+ }
+ return lastChunk.Value.LAddr.Add(lastChunk.Value.Size), nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) AddPhysicalVolume(id DeviceID, dev PhysicalVolume) error {
+ lv.init()
+ if other, exists := lv.id2pv[id]; exists {
+ return fmt.Errorf("(%p).AddPhysicalVolume: cannot add physical volume %q: already have physical volume %q with id=%v",
+ lv, dev.Name(), other.Name(), id)
+ }
+ lv.id2pv[id] = dev
+ lv.physical2logical[id] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) PhysicalVolumes() map[DeviceID]PhysicalVolume {
+ dup := make(map[DeviceID]PhysicalVolume, len(lv.id2pv))
+ for k, v := range lv.id2pv {
+ dup[k] = v
+ }
+ return dup
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ClearMappings() {
+ lv.logical2physical = nil
+ lv.physical2logical = nil
+}
+
+type Mapping struct {
+ LAddr LogicalAddr
+ PAddr QualifiedPhysicalAddr
+ Size AddrDelta
+ SizeLocked bool
+ Flags *BlockGroupFlags
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) AddMapping(m Mapping) error {
+ lv.init()
+ // sanity check
+ if _, haveDev := lv.id2pv[m.PAddr.Dev]; !haveDev {
+ return fmt.Errorf("(%p).AddMapping: do not have a physical volume with id=%v",
+ lv, m.PAddr.Dev)
+ }
+
+ // logical2physical
+ newChunk := chunkMapping{
+ LAddr: m.LAddr,
+ PAddrs: []QualifiedPhysicalAddr{m.PAddr},
+ Size: m.Size,
+ SizeLocked: m.SizeLocked,
+ Flags: m.Flags,
+ }
+ logicalOverlaps := lv.logical2physical.SearchRange(newChunk.cmpRange)
+ var err error
+ newChunk, err = newChunk.union(logicalOverlaps...)
+ if err != nil {
+ return fmt.Errorf("(%p).AddMapping: %w", lv, err)
+ }
+
+ // physical2logical
+ newExt := devextMapping{
+ PAddr: m.PAddr.Addr,
+ LAddr: m.LAddr,
+ Size: m.Size,
+ SizeLocked: m.SizeLocked,
+ Flags: m.Flags,
+ }
+ physicalOverlaps := lv.physical2logical[m.PAddr.Dev].SearchRange(newExt.cmpRange)
+ newExt, err = newExt.union(physicalOverlaps...)
+ if err != nil {
+ return fmt.Errorf("(%p).AddMapping: %w", lv, err)
+ }
+
+ // optimize
+ if len(logicalOverlaps) == 1 && reflect.DeepEqual(newChunk, logicalOverlaps[0]) &&
+ len(physicalOverlaps) == 1 && reflect.DeepEqual(newExt, physicalOverlaps[0]) {
+ return nil
+ }
+
+ // logical2physical
+ for _, chunk := range logicalOverlaps {
+ lv.logical2physical.Delete(chunk.LAddr)
+ }
+ lv.logical2physical.Insert(newChunk)
+
+ // physical2logical
+ for _, ext := range physicalOverlaps {
+ lv.physical2logical[m.PAddr.Dev].Delete(ext.PAddr)
+ }
+ lv.physical2logical[m.PAddr.Dev].Insert(newExt)
+
+ // sanity check
+ //
+ // This is in-theory unnescessary, but that assumes that I
+ // made no mistakes in my algorithm above.
+ if os.Getenv("PARANOID") != "" {
+ if err := lv.fsck(); err != nil {
+ return err
+ }
+ }
+
+ // done
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) fsck() error {
+ physical2logical := make(map[DeviceID]*rbtree.Tree[PhysicalAddr, devextMapping])
+ if err := lv.logical2physical.Walk(func(node *rbtree.Node[chunkMapping]) error {
+ chunk := node.Value
+ for _, stripe := range chunk.PAddrs {
+ if _, devOK := lv.id2pv[stripe.Dev]; !devOK {
+ return fmt.Errorf("(%p).fsck: chunk references physical volume %v which does not exist",
+ lv, stripe.Dev)
+ }
+ if _, exists := physical2logical[stripe.Dev]; !exists {
+ physical2logical[stripe.Dev] = &rbtree.Tree[PhysicalAddr, devextMapping]{
+ KeyFn: func(ext devextMapping) PhysicalAddr {
+ return ext.PAddr
+ },
+ }
+ }
+ physical2logical[stripe.Dev].Insert(devextMapping{
+ PAddr: stripe.Addr,
+ LAddr: chunk.LAddr,
+ Size: chunk.Size,
+ Flags: chunk.Flags,
+ })
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ if len(lv.physical2logical) != len(physical2logical) {
+ return fmt.Errorf("(%p).fsck: skew between chunk tree and devext tree",
+ lv)
+ }
+ for devid := range lv.physical2logical {
+ if !lv.physical2logical[devid].Equal(physical2logical[devid]) {
+ return fmt.Errorf("(%p).fsck: skew between chunk tree and devext tree",
+ lv)
+ }
+ }
+
+ return nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Mappings() []Mapping {
+ var ret []Mapping
+ _ = lv.logical2physical.Walk(func(node *rbtree.Node[chunkMapping]) error {
+ chunk := node.Value
+ var flags *BlockGroupFlags
+ if chunk.Flags != nil {
+ val := *chunk.Flags
+ flags = &val
+ }
+ for _, slice := range chunk.PAddrs {
+ ret = append(ret, Mapping{
+ LAddr: chunk.LAddr,
+ PAddr: slice,
+ Size: chunk.Size,
+ Flags: flags,
+ })
+ }
+ return nil
+ })
+ return ret
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) Resolve(laddr LogicalAddr) (paddrs map[QualifiedPhysicalAddr]struct{}, maxlen AddrDelta) {
+ node := lv.logical2physical.Search(func(chunk chunkMapping) int {
+ return chunkMapping{LAddr: laddr, Size: 1}.cmpRange(chunk)
+ })
+ if node == nil {
+ return nil, 0
+ }
+
+ chunk := node.Value
+
+ offsetWithinChunk := laddr.Sub(chunk.LAddr)
+ paddrs = make(map[QualifiedPhysicalAddr]struct{})
+ maxlen = chunk.Size - offsetWithinChunk
+ for _, stripe := range chunk.PAddrs {
+ paddrs[QualifiedPhysicalAddr{
+ Dev: stripe.Dev,
+ Addr: stripe.Addr.Add(offsetWithinChunk),
+ }] = struct{}{}
+ }
+
+ return paddrs, maxlen
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ResolveAny(laddr LogicalAddr, size AddrDelta) (LogicalAddr, QualifiedPhysicalAddr) {
+ node := lv.logical2physical.Search(func(chunk chunkMapping) int {
+ return chunkMapping{LAddr: laddr, Size: size}.cmpRange(chunk)
+ })
+ if node == nil {
+ return -1, QualifiedPhysicalAddr{0, -1}
+ }
+ return node.Value.LAddr, node.Value.PAddrs[0]
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) UnResolve(paddr QualifiedPhysicalAddr) LogicalAddr {
+ node := lv.physical2logical[paddr.Dev].Search(func(ext devextMapping) int {
+ return devextMapping{PAddr: paddr.Addr, Size: 1}.cmpRange(ext)
+ })
+ if node == nil {
+ return -1
+ }
+
+ ext := node.Value
+
+ offsetWithinExt := paddr.Addr.Sub(ext.PAddr)
+ return ext.LAddr.Add(offsetWithinExt)
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) ReadAt(dat []byte, laddr LogicalAddr) (int, error) {
+ done := 0
+ for done < len(dat) {
+ n, err := lv.maybeShortReadAt(dat[done:], laddr+LogicalAddr(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) maybeShortReadAt(dat []byte, laddr LogicalAddr) (int, error) {
+ paddrs, maxlen := lv.Resolve(laddr)
+ if len(paddrs) == 0 {
+ return 0, fmt.Errorf("read: could not map logical address %v", laddr)
+ }
+ if AddrDelta(len(dat)) > maxlen {
+ dat = dat[:maxlen]
+ }
+
+ buf := make([]byte, len(dat))
+ first := true
+ for paddr := range paddrs {
+ dev, ok := lv.id2pv[paddr.Dev]
+ if !ok {
+ return 0, fmt.Errorf("device=%v does not exist", paddr.Dev)
+ }
+ if _, err := dev.ReadAt(buf, paddr.Addr); err != nil {
+ return 0, fmt.Errorf("read device=%v paddr=%v: %w", paddr.Dev, paddr.Addr, err)
+ }
+ if first {
+ copy(dat, buf)
+ } else {
+ if !bytes.Equal(dat, buf) {
+ return 0, fmt.Errorf("inconsistent stripes at laddr=%v len=%v", laddr, len(dat))
+ }
+ }
+ }
+ return len(dat), nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) WriteAt(dat []byte, laddr LogicalAddr) (int, error) {
+ done := 0
+ for done < len(dat) {
+ n, err := lv.maybeShortWriteAt(dat[done:], laddr+LogicalAddr(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (lv *LogicalVolume[PhysicalVolume]) maybeShortWriteAt(dat []byte, laddr LogicalAddr) (int, error) {
+ paddrs, maxlen := lv.Resolve(laddr)
+ if len(paddrs) == 0 {
+ return 0, fmt.Errorf("write: could not map logical address %v", laddr)
+ }
+ if AddrDelta(len(dat)) > maxlen {
+ dat = dat[:maxlen]
+ }
+
+ for paddr := range paddrs {
+ dev, ok := lv.id2pv[paddr.Dev]
+ if !ok {
+ return 0, fmt.Errorf("device=%v does not exist", paddr.Dev)
+ }
+ if _, err := dev.WriteAt(dat, paddr.Addr); err != nil {
+ return 0, fmt.Errorf("write device=%v paddr=%v: %w", paddr.Dev, paddr.Addr, err)
+ }
+ }
+ return len(dat), nil
+}
diff --git a/lib/btrfs/internal/itemtype.go b/lib/btrfs/internal/itemtype.go
new file mode 100644
index 0000000..60731aa
--- /dev/null
+++ b/lib/btrfs/internal/itemtype.go
@@ -0,0 +1,77 @@
+// Code generated by Make. DO NOT EDIT.
+
+package internal
+
+import "fmt"
+
+type ItemType uint8
+
+const (
+ BLOCK_GROUP_ITEM_KEY = ItemType(192)
+ CHUNK_ITEM_KEY = ItemType(228)
+ DEV_EXTENT_KEY = ItemType(204)
+ DEV_ITEM_KEY = ItemType(216)
+ DIR_INDEX_KEY = ItemType(96)
+ DIR_ITEM_KEY = ItemType(84)
+ EXTENT_CSUM_KEY = ItemType(128)
+ EXTENT_DATA_KEY = ItemType(108)
+ EXTENT_DATA_REF_KEY = ItemType(178)
+ EXTENT_ITEM_KEY = ItemType(168)
+ FREE_SPACE_BITMAP_KEY = ItemType(200)
+ FREE_SPACE_EXTENT_KEY = ItemType(199)
+ FREE_SPACE_INFO_KEY = ItemType(198)
+ INODE_ITEM_KEY = ItemType(1)
+ INODE_REF_KEY = ItemType(12)
+ METADATA_ITEM_KEY = ItemType(169)
+ ORPHAN_ITEM_KEY = ItemType(48)
+ PERSISTENT_ITEM_KEY = ItemType(249)
+ QGROUP_RELATION_KEY = ItemType(246)
+ ROOT_BACKREF_KEY = ItemType(144)
+ ROOT_ITEM_KEY = ItemType(132)
+ ROOT_REF_KEY = ItemType(156)
+ SHARED_BLOCK_REF_KEY = ItemType(182)
+ SHARED_DATA_REF_KEY = ItemType(184)
+ TREE_BLOCK_REF_KEY = ItemType(176)
+ UNTYPED_KEY = ItemType(0)
+ UUID_RECEIVED_SUBVOL_KEY = ItemType(252)
+ UUID_SUBVOL_KEY = ItemType(251)
+ XATTR_ITEM_KEY = ItemType(24)
+)
+
+func (t ItemType) String() string {
+ names := map[ItemType]string{
+ BLOCK_GROUP_ITEM_KEY: "BLOCK_GROUP_ITEM",
+ CHUNK_ITEM_KEY: "CHUNK_ITEM",
+ DEV_EXTENT_KEY: "DEV_EXTENT",
+ DEV_ITEM_KEY: "DEV_ITEM",
+ DIR_INDEX_KEY: "DIR_INDEX",
+ DIR_ITEM_KEY: "DIR_ITEM",
+ EXTENT_CSUM_KEY: "EXTENT_CSUM",
+ EXTENT_DATA_KEY: "EXTENT_DATA",
+ EXTENT_DATA_REF_KEY: "EXTENT_DATA_REF",
+ EXTENT_ITEM_KEY: "EXTENT_ITEM",
+ FREE_SPACE_BITMAP_KEY: "FREE_SPACE_BITMAP",
+ FREE_SPACE_EXTENT_KEY: "FREE_SPACE_EXTENT",
+ FREE_SPACE_INFO_KEY: "FREE_SPACE_INFO",
+ INODE_ITEM_KEY: "INODE_ITEM",
+ INODE_REF_KEY: "INODE_REF",
+ METADATA_ITEM_KEY: "METADATA_ITEM",
+ ORPHAN_ITEM_KEY: "ORPHAN_ITEM",
+ PERSISTENT_ITEM_KEY: "PERSISTENT_ITEM",
+ QGROUP_RELATION_KEY: "QGROUP_RELATION",
+ ROOT_BACKREF_KEY: "ROOT_BACKREF",
+ ROOT_ITEM_KEY: "ROOT_ITEM",
+ ROOT_REF_KEY: "ROOT_REF",
+ SHARED_BLOCK_REF_KEY: "SHARED_BLOCK_REF",
+ SHARED_DATA_REF_KEY: "SHARED_DATA_REF",
+ TREE_BLOCK_REF_KEY: "TREE_BLOCK_REF",
+ UNTYPED_KEY: "UNTYPED",
+ UUID_RECEIVED_SUBVOL_KEY: "UUID_KEY_RECEIVED_SUBVOL",
+ UUID_SUBVOL_KEY: "UUID_KEY_SUBVOL",
+ XATTR_ITEM_KEY: "XATTR_ITEM",
+ }
+ if name, ok := names[t]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", t)
+}
diff --git a/lib/btrfs/internal/misc.go b/lib/btrfs/internal/misc.go
new file mode 100644
index 0000000..fba1d38
--- /dev/null
+++ b/lib/btrfs/internal/misc.go
@@ -0,0 +1,37 @@
+package internal
+
+import (
+ "time"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Generation uint64
+
+type Key struct {
+ ObjectID ObjID `bin:"off=0x0, siz=0x8"` // Each tree has its own set of Object IDs.
+ ItemType ItemType `bin:"off=0x8, siz=0x1"`
+ Offset uint64 `bin:"off=0x9, siz=0x8"` // The meaning depends on the item type.
+ binstruct.End `bin:"off=0x11"`
+}
+
+func (a Key) Cmp(b Key) int {
+ if d := util.CmpUint(a.ObjectID, b.ObjectID); d != 0 {
+ return d
+ }
+ if d := util.CmpUint(a.ItemType, b.ItemType); d != 0 {
+ return d
+ }
+ return util.CmpUint(a.Offset, b.Offset)
+}
+
+type Time struct {
+ Sec int64 `bin:"off=0x0, siz=0x8"` // Number of seconds since 1970-01-01T00:00:00Z.
+ NSec uint32 `bin:"off=0x8, siz=0x4"` // Number of nanoseconds since the beginning of the second.
+ binstruct.End `bin:"off=0xc"`
+}
+
+func (t Time) ToStd() time.Time {
+ return time.Unix(t.Sec, int64(t.NSec))
+}
diff --git a/lib/btrfs/internal/objid.go b/lib/btrfs/internal/objid.go
new file mode 100644
index 0000000..8c9c002
--- /dev/null
+++ b/lib/btrfs/internal/objid.go
@@ -0,0 +1,144 @@
+package internal
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type ObjID uint64
+
+const (
+ // The IDs of the various trees
+ ROOT_TREE_OBJECTID = ObjID(1) // holds pointers to all of the tree roots
+ EXTENT_TREE_OBJECTID = ObjID(2) // stores information about which extents are in use, and reference counts
+ CHUNK_TREE_OBJECTID = ObjID(3) // chunk tree stores translations from logical -> physical block numbering
+ DEV_TREE_OBJECTID = ObjID(4) // stores info about which areas of a given device are in use; one per device
+ FS_TREE_OBJECTID = ObjID(5) // one per subvolume, storing files and directories
+ ROOT_TREE_DIR_OBJECTID = ObjID(6) // directory objectid inside the root tree
+ CSUM_TREE_OBJECTID = ObjID(7) // holds checksums of all the data extents
+ QUOTA_TREE_OBJECTID = ObjID(8)
+ UUID_TREE_OBJECTID = ObjID(9) // for storing items that use the UUID_*_KEY
+ FREE_SPACE_TREE_OBJECTID = ObjID(10) // tracks free space in block groups.
+ BLOCK_GROUP_TREE_OBJECTID = ObjID(11) // hold the block group items.
+
+ // Objects in the DEV_TREE
+ DEV_STATS_OBJECTID = ObjID(0) // device stats in the device tree
+
+ // ???
+ BALANCE_OBJECTID = ObjID(util.MaxUint64pp - 4) // for storing balance parameters in the root tree
+ ORPHAN_OBJECTID = ObjID(util.MaxUint64pp - 5) // orphan objectid for tracking unlinked/truncated files
+ TREE_LOG_OBJECTID = ObjID(util.MaxUint64pp - 6) // does write ahead logging to speed up fsyncs
+ TREE_LOG_FIXUP_OBJECTID = ObjID(util.MaxUint64pp - 7)
+ TREE_RELOC_OBJECTID = ObjID(util.MaxUint64pp - 8) // space balancing
+ DATA_RELOC_TREE_OBJECTID = ObjID(util.MaxUint64pp - 9)
+ EXTENT_CSUM_OBJECTID = ObjID(util.MaxUint64pp - 10) // extent checksums all have this objectid
+ FREE_SPACE_OBJECTID = ObjID(util.MaxUint64pp - 11) // For storing free space cache
+ FREE_INO_OBJECTID = ObjID(util.MaxUint64pp - 12) // stores the inode number for the free-ino cache
+
+ MULTIPLE_OBJECTIDS = ObjID(util.MaxUint64pp - 255) // dummy objectid represents multiple objectids
+
+ // All files have objectids in this range.
+ FIRST_FREE_OBJECTID = ObjID(256)
+ LAST_FREE_OBJECTID = ObjID(util.MaxUint64pp - 256)
+
+ FIRST_CHUNK_TREE_OBJECTID = ObjID(256)
+
+ // Objects in the CHUNK_TREE
+ DEV_ITEMS_OBJECTID = ObjID(1)
+
+ // ???
+ EMPTY_SUBVOL_DIR_OBJECTID = ObjID(2)
+)
+
+func (id ObjID) Format(typ ItemType) string {
+ switch typ {
+ case PERSISTENT_ITEM_KEY:
+ names := map[ObjID]string{
+ DEV_STATS_OBJECTID: "DEV_STATS",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ case DEV_EXTENT_KEY:
+ return fmt.Sprintf("%d", int64(id))
+ case QGROUP_RELATION_KEY:
+ return fmt.Sprintf("%d/%d",
+ uint64(id)>>48,
+ uint64(id)&((1<<48)-1))
+ case UUID_SUBVOL_KEY, UUID_RECEIVED_SUBVOL_KEY:
+ return fmt.Sprintf("%#016x", uint64(id))
+ case DEV_ITEM_KEY:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ DEV_ITEMS_OBJECTID: "DEV_ITEMS",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ case CHUNK_ITEM_KEY:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ FIRST_CHUNK_TREE_OBJECTID: "FIRST_CHUNK_TREE",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ default:
+ names := map[ObjID]string{
+ BALANCE_OBJECTID: "BALANCE",
+ ORPHAN_OBJECTID: "ORPHAN",
+ TREE_LOG_OBJECTID: "TREE_LOG",
+ TREE_LOG_FIXUP_OBJECTID: "TREE_LOG_FIXUP",
+ TREE_RELOC_OBJECTID: "TREE_RELOC",
+ DATA_RELOC_TREE_OBJECTID: "DATA_RELOC_TREE",
+ EXTENT_CSUM_OBJECTID: "EXTENT_CSUM",
+ FREE_SPACE_OBJECTID: "FREE_SPACE",
+ FREE_INO_OBJECTID: "FREE_INO",
+ MULTIPLE_OBJECTIDS: "MULTIPLE",
+
+ ROOT_TREE_OBJECTID: "ROOT_TREE",
+ EXTENT_TREE_OBJECTID: "EXTENT_TREE",
+ CHUNK_TREE_OBJECTID: "CHUNK_TREE",
+ DEV_TREE_OBJECTID: "DEV_TREE",
+ FS_TREE_OBJECTID: "FS_TREE",
+ ROOT_TREE_DIR_OBJECTID: "ROOT_TREE_DIR",
+ CSUM_TREE_OBJECTID: "CSUM_TREE",
+ QUOTA_TREE_OBJECTID: "QUOTA_TREE",
+ UUID_TREE_OBJECTID: "UUID_TREE",
+ FREE_SPACE_TREE_OBJECTID: "FREE_SPACE_TREE",
+ BLOCK_GROUP_TREE_OBJECTID: "BLOCK_GROUP_TREE",
+ }
+ if name, ok := names[id]; ok {
+ return name
+ }
+ return fmt.Sprintf("%d", int64(id))
+ }
+}
+
+func (id ObjID) String() string {
+ return id.Format(UNTYPED_KEY)
+}
diff --git a/lib/btrfs/io1_pv.go b/lib/btrfs/io1_pv.go
new file mode 100644
index 0000000..cababaf
--- /dev/null
+++ b/lib/btrfs/io1_pv.go
@@ -0,0 +1,96 @@
+package btrfs
+
+import (
+ "fmt"
+ "os"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Device struct {
+ *os.File
+
+ cacheSuperblocks []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ cacheSuperblock *util.Ref[btrfsvol.PhysicalAddr, Superblock]
+}
+
+var _ util.File[btrfsvol.PhysicalAddr] = (*Device)(nil)
+
+func (dev Device) Size() (btrfsvol.PhysicalAddr, error) {
+ fi, err := dev.Stat()
+ if err != nil {
+ return 0, err
+ }
+ return btrfsvol.PhysicalAddr(fi.Size()), nil
+}
+
+func (dev *Device) ReadAt(dat []byte, paddr btrfsvol.PhysicalAddr) (int, error) {
+ return dev.File.ReadAt(dat, int64(paddr))
+}
+
+func (dev *Device) WriteAt(dat []byte, paddr btrfsvol.PhysicalAddr) (int, error) {
+ return dev.File.WriteAt(dat, int64(paddr))
+}
+
+var SuperblockAddrs = []btrfsvol.PhysicalAddr{
+ 0x00_0001_0000, // 64KiB
+ 0x00_0400_0000, // 64MiB
+ 0x40_0000_0000, // 256GiB
+}
+
+func (dev *Device) Superblocks() ([]*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if dev.cacheSuperblocks != nil {
+ return dev.cacheSuperblocks, nil
+ }
+ superblockSize := btrfsvol.PhysicalAddr(binstruct.StaticSize(Superblock{}))
+
+ sz, err := dev.Size()
+ if err != nil {
+ return nil, err
+ }
+
+ var ret []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ for i, addr := range SuperblockAddrs {
+ if addr+superblockSize <= sz {
+ superblock := &util.Ref[btrfsvol.PhysicalAddr, Superblock]{
+ File: dev,
+ Addr: addr,
+ }
+ if err := superblock.Read(); err != nil {
+ return nil, fmt.Errorf("superblock %v: %w", i, err)
+ }
+ ret = append(ret, superblock)
+ }
+ }
+ if len(ret) == 0 {
+ return nil, fmt.Errorf("no superblocks")
+ }
+ dev.cacheSuperblocks = ret
+ return ret, nil
+}
+
+func (dev *Device) Superblock() (*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if dev.cacheSuperblock != nil {
+ return dev.cacheSuperblock, nil
+ }
+ sbs, err := dev.Superblocks()
+ if err != nil {
+ return nil, err
+ }
+
+ for i, sb := range sbs {
+ if err := sb.Data.ValidateChecksum(); err != nil {
+ return nil, fmt.Errorf("superblock %v: %w", i, err)
+ }
+ if i > 0 {
+ if !sb.Data.Equal(sbs[0].Data) {
+ return nil, fmt.Errorf("superblock %v and superblock %v disagree", 0, i)
+ }
+ }
+ }
+
+ dev.cacheSuperblock = sbs[0]
+ return sbs[0], nil
+}
diff --git a/lib/btrfs/io2_lv.go b/lib/btrfs/io2_lv.go
new file mode 100644
index 0000000..c6ef9e0
--- /dev/null
+++ b/lib/btrfs/io2_lv.go
@@ -0,0 +1,187 @@
+package btrfs
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type FS struct {
+ // You should probably not access .LV directly, except when
+ // implementing special things like fsck.
+ LV btrfsvol.LogicalVolume[*Device]
+
+ cacheSuperblocks []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ cacheSuperblock *util.Ref[btrfsvol.PhysicalAddr, Superblock]
+}
+
+var _ util.File[btrfsvol.LogicalAddr] = (*FS)(nil)
+
+func (fs *FS) AddDevice(dev *Device) error {
+ sb, err := dev.Superblock()
+ if err != nil {
+ return err
+ }
+ if err := fs.LV.AddPhysicalVolume(sb.Data.DevItem.DevID, dev); err != nil {
+ return err
+ }
+ fs.cacheSuperblocks = nil
+ fs.cacheSuperblock = nil
+ if err := fs.initDev(sb); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fs *FS) Name() string {
+ if name := fs.LV.Name(); name != "" {
+ return name
+ }
+ sb, err := fs.Superblock()
+ if err != nil {
+ return fmt.Sprintf("fs_uuid=%v", "(unreadable)")
+ }
+ name := fmt.Sprintf("fs_uuid=%v", sb.Data.FSUUID)
+ fs.LV.SetName(name)
+ return name
+}
+
+func (fs *FS) Size() (btrfsvol.LogicalAddr, error) {
+ return fs.LV.Size()
+}
+
+func (fs *FS) ReadAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
+ return fs.LV.ReadAt(p, off)
+}
+func (fs *FS) WriteAt(p []byte, off btrfsvol.LogicalAddr) (int, error) {
+ return fs.LV.WriteAt(p, off)
+}
+
+func (fs *FS) Resolve(laddr btrfsvol.LogicalAddr) (paddrs map[btrfsvol.QualifiedPhysicalAddr]struct{}, maxlen btrfsvol.AddrDelta) {
+ return fs.LV.Resolve(laddr)
+}
+
+func (fs *FS) Superblocks() ([]*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if fs.cacheSuperblocks != nil {
+ return fs.cacheSuperblocks, nil
+ }
+ var ret []*util.Ref[btrfsvol.PhysicalAddr, Superblock]
+ devs := fs.LV.PhysicalVolumes()
+ if len(devs) == 0 {
+ return nil, fmt.Errorf("no devices")
+ }
+ for _, dev := range devs {
+ sbs, err := dev.Superblocks()
+ if err != nil {
+ return nil, fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ ret = append(ret, sbs...)
+ }
+ fs.cacheSuperblocks = ret
+ return ret, nil
+}
+
+func (fs *FS) Superblock() (*util.Ref[btrfsvol.PhysicalAddr, Superblock], error) {
+ if fs.cacheSuperblock != nil {
+ return fs.cacheSuperblock, nil
+ }
+ sbs, err := fs.Superblocks()
+ if err != nil {
+ return nil, err
+ }
+ if len(sbs) == 0 {
+ return nil, fmt.Errorf("no superblocks")
+ }
+
+ fname := ""
+ sbi := 0
+ for i, sb := range sbs {
+ if sb.File.Name() != fname {
+ fname = sb.File.Name()
+ sbi = 0
+ } else {
+ sbi++
+ }
+
+ if err := sb.Data.ValidateChecksum(); err != nil {
+ return nil, fmt.Errorf("file %q superblock %v: %w", sb.File.Name(), sbi, err)
+ }
+ if i > 0 {
+ // FIXME(lukeshu): This is probably wrong, but
+ // lots of my multi-device code is probably
+ // wrong.
+ if !sb.Data.Equal(sbs[0].Data) {
+ return nil, fmt.Errorf("file %q superblock %v and file %q superblock %v disagree",
+ sbs[0].File.Name(), 0,
+ sb.File.Name(), sbi)
+ }
+ }
+ }
+
+ fs.cacheSuperblock = sbs[0]
+ return sbs[0], nil
+}
+
+func (fs *FS) ReInit() error {
+ fs.LV.ClearMappings()
+ for _, dev := range fs.LV.PhysicalVolumes() {
+ sb, err := dev.Superblock()
+ if err != nil {
+ return fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ if err := fs.initDev(sb); err != nil {
+ return fmt.Errorf("file %q: %w", dev.Name(), err)
+ }
+ }
+ return nil
+}
+
+func (fs *FS) initDev(sb *util.Ref[btrfsvol.PhysicalAddr, Superblock]) error {
+ syschunks, err := sb.Data.ParseSysChunkArray()
+ if err != nil {
+ return err
+ }
+ for _, chunk := range syschunks {
+ for _, mapping := range chunk.Chunk.Mappings(chunk.Key) {
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ return err
+ }
+ }
+ }
+ if err := fs.TreeWalk(CHUNK_TREE_OBJECTID, TreeWalkHandler{
+ Item: func(_ TreePath, item Item) error {
+ if item.Head.Key.ItemType != btrfsitem.CHUNK_ITEM_KEY {
+ return nil
+ }
+ for _, mapping := range item.Body.(btrfsitem.Chunk).Mappings(item.Head.Key) {
+ if err := fs.LV.AddMapping(mapping); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fs *FS) Close() error {
+ var errs derror.MultiError
+ for _, dev := range fs.LV.PhysicalVolumes() {
+ if err := dev.Close(); err != nil && err == nil {
+ errs = append(errs, err)
+ }
+ }
+ if errs != nil {
+ return errs
+ }
+ return nil
+}
+
+var _ io.Closer = (*FS)(nil)
diff --git a/lib/btrfs/io3_btree.go b/lib/btrfs/io3_btree.go
new file mode 100644
index 0000000..dbc2ac1
--- /dev/null
+++ b/lib/btrfs/io3_btree.go
@@ -0,0 +1,562 @@
+package btrfs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ iofs "io/fs"
+ "strings"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// - The first element will always have an ItemIdx of -1.
+//
+// - For .Item() callbacks, the last element will always have a
+// NodeAddr of 0.
+//
+// For example, given the tree structure
+//
+// [superblock]
+// |
+// | <------------------------------------------ pathElem={idx:-1, addr:0x01, lvl:3}
+// |
+// +[0x01]-----------+
+// | lvl=3 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <------------------------------ pathElem={idx:8, addr:0x02, lvl:2}
+// |
+// +[0x02]-----------+
+// | lvl=2 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <-------------------- pathElem={idx:7, addr:0x03, lvl:1}
+// |
+// +[0x03]-----------+
+// | lvl=1 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <---------------- pathElem={idx:4, addr:0x04, lvl:0}
+// |
+// +[0x04]-----------+
+// | lvl=0 |
+// +-+-+-+-+-+-+-+-+-+
+// |1|2|3|4|5|6|7|8|9|
+// +---+---+---+---+-+
+// |
+// | <--------------- pathElem={idx:5, addr:0, lvl:0}
+// |
+// [item]
+//
+// the path would be
+//
+// {-1, 0x01, 3}→{8, 0x02, 2}→{7, 0x03, 1}→{4, 0x04, 0}→{2, 0, 0}
+type TreePath []TreePathElem
+
+// A TreePathElem essentially represents a KeyPointer.
+type TreePathElem struct {
+ // ItemIdx is the index of this KeyPointer in the parent Node;
+ // or -1 if this is the root and there is no KeyPointer.
+ ItemIdx int
+ // NodeAddr is the address of the node that the KeyPointer
+ // points at, or 0 if this is a leaf item and nothing is
+ // being pointed at.
+ NodeAddr btrfsvol.LogicalAddr
+ // NodeLevel is the expected or actual level of the node at
+ // NodeAddr.
+ NodeLevel uint8
+}
+
+func (elem TreePathElem) writeNodeTo(w io.Writer) {
+ fmt.Fprintf(w, "node:%d@%v", elem.NodeLevel, elem.NodeAddr)
+}
+
+func (path TreePath) String() string {
+ if len(path) == 0 {
+ return "(empty-path)"
+ }
+ var ret strings.Builder
+ path[0].writeNodeTo(&ret)
+ for _, elem := range path[1:] {
+ fmt.Fprintf(&ret, "[%v]", elem.ItemIdx)
+ if elem.NodeAddr != 0 {
+ ret.WriteString("->")
+ elem.writeNodeTo(&ret)
+ }
+ }
+ return ret.String()
+}
+
+// A treeRoot is more-or-less a btrfsitem.Root, but simpler and generalized for
+type treeRoot struct {
+ TreeID ObjID
+ RootNode btrfsvol.LogicalAddr
+ Level uint8
+ Generation Generation
+}
+
+func (fs *FS) lookupTree(treeID ObjID) (*treeRoot, error) {
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, err
+ }
+ switch treeID {
+ case ROOT_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.RootTree,
+ Level: sb.Data.RootLevel,
+ Generation: sb.Data.Generation, // XXX: same generation as LOG_TREE?
+ }, nil
+ case CHUNK_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.ChunkTree,
+ Level: sb.Data.ChunkLevel,
+ Generation: sb.Data.ChunkRootGeneration,
+ }, nil
+ case TREE_LOG_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.LogTree,
+ Level: sb.Data.LogLevel,
+ Generation: sb.Data.Generation, // XXX: same generation as ROOT_TREE?
+ }, nil
+ case BLOCK_GROUP_TREE_OBJECTID:
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: sb.Data.BlockGroupRoot,
+ Level: sb.Data.BlockGroupRootLevel,
+ Generation: sb.Data.BlockGroupRootGeneration,
+ }, nil
+ default:
+ rootItem, err := fs.TreeSearch(ROOT_TREE_OBJECTID, func(key Key) int {
+ if key.ObjectID == treeID && key.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ return 0
+ }
+ return Key{
+ ObjectID: treeID,
+ ItemType: btrfsitem.ROOT_ITEM_KEY,
+ Offset: 0,
+ }.Cmp(key)
+ })
+ if err != nil {
+ return nil, err
+ }
+ rootItemBody, ok := rootItem.Body.(btrfsitem.Root)
+ if !ok {
+ return nil, fmt.Errorf("malformed ROOT_ITEM for tree %v", treeID)
+ }
+ return &treeRoot{
+ TreeID: treeID,
+ RootNode: rootItemBody.ByteNr,
+ Level: rootItemBody.Level,
+ Generation: rootItemBody.Generation,
+ }, nil
+ }
+}
+
+type TreeWalkHandler struct {
+ // Callbacks for entire nodes
+ PreNode func(TreePath) error
+ Node func(TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) error
+ PostNode func(TreePath, *util.Ref[btrfsvol.LogicalAddr, Node]) error
+ // Callbacks for items on internal nodes
+ PreKeyPointer func(TreePath, KeyPointer) error
+ PostKeyPointer func(TreePath, KeyPointer) error
+ // Callbacks for items on leaf nodes
+ Item func(TreePath, Item) error
+}
+
+// The lifecycle of callbacks is:
+//
+// 001 .PreNode()
+// 002 (read node)
+// 003 .Node()
+// for item in node.items:
+// if internal:
+// 004 .PreKeyPointer()
+// 005 (recurse)
+// 006 .PostKeyPointer()
+// else:
+// 004 .Item()
+// 007 .PostNode()
+func (fs *FS) TreeWalk(treeID ObjID, cbs TreeWalkHandler) error {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return err
+ }
+ path := TreePath{
+ TreePathElem{
+ ItemIdx: -1,
+ NodeAddr: rootInfo.RootNode,
+ NodeLevel: rootInfo.Level,
+ },
+ }
+ return fs.treeWalk(path, cbs)
+}
+
+func (fs *FS) treeWalk(path TreePath, cbs TreeWalkHandler) error {
+ if path[len(path)-1].NodeAddr == 0 {
+ return nil
+ }
+
+ if cbs.PreNode != nil {
+ if err := cbs.PreNode(path); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return err
+ }
+ }
+ node, err := fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if node != nil && err == nil {
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+ }
+ if cbs.Node != nil {
+ err = cbs.Node(path, node, err)
+ }
+ if err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return fmt.Errorf("btrfs.FS.TreeWalk: %w", err)
+ }
+ if node != nil {
+ for i, item := range node.Data.BodyInternal {
+ itemPath := append(path, TreePathElem{
+ ItemIdx: i,
+ NodeAddr: item.BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ if cbs.PreKeyPointer != nil {
+ if err := cbs.PreKeyPointer(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return err
+ }
+ }
+ if err := fs.treeWalk(itemPath, cbs); err != nil {
+ return err
+ }
+ if cbs.PostKeyPointer != nil {
+ if err := cbs.PostKeyPointer(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return err
+ }
+ }
+ }
+ for i, item := range node.Data.BodyLeaf {
+ if cbs.Item != nil {
+ itemPath := append(path, TreePathElem{
+ ItemIdx: i,
+ })
+ if err := cbs.Item(itemPath, item); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ continue
+ }
+ return fmt.Errorf("btrfs.FS.TreeWalk: callback: %w", err)
+ }
+ }
+ }
+ }
+ if cbs.PostNode != nil {
+ if err := cbs.PostNode(path, node); err != nil {
+ if errors.Is(err, iofs.SkipDir) {
+ return nil
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (fs *FS) treeSearch(treeRoot treeRoot, fn func(Key) int) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ path := TreePath{
+ TreePathElem{
+ ItemIdx: -1,
+ NodeAddr: treeRoot.RootNode,
+ NodeLevel: treeRoot.Level,
+ },
+ }
+ for {
+ if path[len(path)-1].NodeAddr == 0 {
+ return nil, nil, iofs.ErrNotExist
+ }
+ node, err := fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+
+ if node.Data.Head.Level > 0 {
+ // internal node
+
+ // Search for the right-most node.Data.BodyInternal item for which
+ // `fn(item.Key) >= 0`.
+ //
+ // + + + + 0 - - - -
+ //
+ // There may or may not be a value that returns '0'.
+ //
+ // Implement this search as a binary search.
+ lastGood := -1
+ firstBad := len(node.Data.BodyInternal)
+ for firstBad > lastGood+1 {
+ midpoint := (lastGood + firstBad) / 2
+ direction := fn(node.Data.BodyInternal[midpoint].Key)
+ if direction < 0 {
+ firstBad = midpoint
+ } else {
+ lastGood = midpoint
+ }
+ }
+ if lastGood < 0 {
+ return nil, nil, iofs.ErrNotExist
+ }
+ path = append(path, TreePathElem{
+ ItemIdx: lastGood,
+ NodeAddr: node.Data.BodyInternal[lastGood].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ // leaf node
+
+ // Search for a member of node.Data.BodyLeaf for which
+ // `fn(item.Head.Key) == 0`.
+ //
+ // + + + + 0 - - - -
+ //
+ // Such an item might not exist; in this case, return nil/ErrNotExist.
+ // Multiple such items might exist; in this case, it does not matter which
+ // is returned.
+ //
+ // Implement this search as a binary search.
+ beg := 0
+ end := len(node.Data.BodyLeaf)
+ for beg < end {
+ midpoint := (beg + end) / 2
+ direction := fn(node.Data.BodyLeaf[midpoint].Head.Key)
+ switch {
+ case direction < 0:
+ end = midpoint
+ case direction > 0:
+ beg = midpoint + 1
+ case direction == 0:
+ path = append(path, TreePathElem{
+ ItemIdx: midpoint,
+ })
+ return path, node, nil
+ }
+ }
+ return nil, nil, iofs.ErrNotExist
+ }
+ }
+}
+
+func (fs *FS) prev(path TreePath, node *util.Ref[btrfsvol.LogicalAddr, Node]) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ var err error
+ path = append(TreePath(nil), path...)
+
+ // go up
+ for path[len(path)-1].ItemIdx < 1 {
+ path = path[:len(path)-1]
+ if len(path) == 0 {
+ return nil, nil, nil
+ }
+ }
+ // go left
+ path[len(path)-1].ItemIdx--
+ if path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeAddr = node.Data.BodyInternal[path[len(path)-1].ItemIdx].BlockPtr
+ }
+ }
+ // go down
+ for path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-1].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if node.Data.Head.Level > 0 {
+ path = append(path, TreePathElem{
+ ItemIdx: len(node.Data.BodyInternal) - 1,
+ NodeAddr: node.Data.BodyInternal[len(node.Data.BodyInternal)-1].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ path = append(path, TreePathElem{
+ ItemIdx: len(node.Data.BodyLeaf) - 1,
+ })
+ }
+ }
+ // return
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return path, node, nil
+}
+
+func (fs *FS) next(path TreePath, node *util.Ref[btrfsvol.LogicalAddr, Node]) (TreePath, *util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ var err error
+ path = append(TreePath(nil), path...)
+
+ // go up
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-2].NodeLevel = node.Data.Head.Level
+ }
+ for path[len(path)-1].ItemIdx+1 >= int(node.Data.Head.NumItems) {
+ path = path[:len(path)-1]
+ if len(path) == 1 {
+ return nil, nil, nil
+ }
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-2].NodeLevel = node.Data.Head.Level
+ }
+ }
+ // go left
+ path[len(path)-1].ItemIdx++
+ if path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeAddr = node.Data.BodyInternal[path[len(path)-1].ItemIdx].BlockPtr
+ }
+ }
+ // go down
+ for path[len(path)-1].NodeAddr != 0 {
+ if node.Addr != path[len(path)-1].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-1].NodeAddr, path[len(path)-1].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ path[len(path)-1].NodeLevel = node.Data.Head.Level
+ }
+ if node.Data.Head.Level > 0 {
+ path = append(path, TreePathElem{
+ ItemIdx: 0,
+ NodeAddr: node.Data.BodyInternal[len(node.Data.BodyInternal)-1].BlockPtr,
+ NodeLevel: node.Data.Head.Level - 1,
+ })
+ } else {
+ path = append(path, TreePathElem{
+ ItemIdx: 0,
+ })
+ }
+ }
+ // return
+ if node.Addr != path[len(path)-2].NodeAddr {
+ node, err = fs.readNodeAtLevel(path[len(path)-2].NodeAddr, path[len(path)-2].NodeLevel)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return path, node, nil
+}
+
+func (fs *FS) TreeSearch(treeID ObjID, fn func(Key) int) (Item, error) {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return Item{}, err
+ }
+ path, node, err := fs.treeSearch(*rootInfo, fn)
+ if err != nil {
+ return Item{}, err
+ }
+ return node.Data.BodyLeaf[path[len(path)-1].ItemIdx], nil
+}
+
+func (fs *FS) TreeLookup(treeID ObjID, key Key) (Item, error) {
+ item, err := fs.TreeSearch(treeID, key.Cmp)
+ if err != nil {
+ err = fmt.Errorf("item with key=%v: %w", key, err)
+ }
+ return item, err
+}
+
+// If some items are able to be read, but there is an error reading the full set, then it might
+// return *both* a list of items and an error.
+//
+// If no such item is found, an error that is io/fs.ErrNotExist is returned.
+func (fs *FS) TreeSearchAll(treeID ObjID, fn func(Key) int) ([]Item, error) {
+ rootInfo, err := fs.lookupTree(treeID)
+ if err != nil {
+ return nil, err
+ }
+ middlePath, middleNode, err := fs.treeSearch(*rootInfo, fn)
+ if err != nil {
+ return nil, err
+ }
+ middleItem := middleNode.Data.BodyLeaf[middlePath[len(middlePath)-1].ItemIdx]
+
+ var ret = []Item{middleItem}
+ var errs derror.MultiError
+ for prevPath, prevNode := middlePath, middleNode; true; {
+ prevPath, prevNode, err = fs.prev(prevPath, prevNode)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ if prevPath == nil {
+ break
+ }
+ prevItem := prevNode.Data.BodyLeaf[prevPath[len(prevPath)-1].ItemIdx]
+ if fn(prevItem.Head.Key) != 0 {
+ break
+ }
+ ret = append(ret, prevItem)
+ }
+ util.ReverseSlice(ret)
+ for nextPath, nextNode := middlePath, middleNode; true; {
+ nextPath, nextNode, err = fs.next(nextPath, nextNode)
+ if err != nil {
+ errs = append(errs, err)
+ break
+ }
+ if nextPath == nil {
+ break
+ }
+ nextItem := nextNode.Data.BodyLeaf[nextPath[len(nextPath)-1].ItemIdx]
+ if fn(nextItem.Head.Key) != 0 {
+ break
+ }
+ ret = append(ret, nextItem)
+ }
+ if errs != nil {
+ err = errs
+ }
+ return ret, err
+}
diff --git a/lib/btrfs/io4_fs.go b/lib/btrfs/io4_fs.go
new file mode 100644
index 0000000..7ae30cb
--- /dev/null
+++ b/lib/btrfs/io4_fs.go
@@ -0,0 +1,404 @@
+package btrfs
+
+import (
+ "fmt"
+ "io"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "sync"
+
+ "github.com/datawire/dlib/derror"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type BareInode struct {
+ Inode ObjID
+ InodeItem *btrfsitem.Inode
+ Errs derror.MultiError
+}
+
+type FullInode struct {
+ BareInode
+ OtherItems []Item
+}
+
+type InodeRef struct {
+ Inode ObjID
+ btrfsitem.InodeRef
+}
+
+type Dir struct {
+ FullInode
+ DotDot *InodeRef
+ ChildrenByName map[string]btrfsitem.DirEntry
+ ChildrenByIndex map[uint64]btrfsitem.DirEntry
+ SV *Subvolume
+}
+
+type FileExtent struct {
+ OffsetWithinFile int64
+ btrfsitem.FileExtent
+}
+
+type File struct {
+ FullInode
+ Extents []FileExtent
+ SV *Subvolume
+}
+
+type Subvolume struct {
+ FS *FS
+ TreeID ObjID
+
+ rootOnce sync.Once
+ rootVal btrfsitem.Root
+ rootErr error
+
+ bareInodeCache util.LRUCache[ObjID, *BareInode]
+ fullInodeCache util.LRUCache[ObjID, *FullInode]
+ dirCache util.LRUCache[ObjID, *Dir]
+ fileCache util.LRUCache[ObjID, *File]
+}
+
+func (sv *Subvolume) init() {
+ sv.rootOnce.Do(func() {
+ root, err := sv.FS.TreeLookup(ROOT_TREE_OBJECTID, Key{
+ ObjectID: sv.TreeID,
+ ItemType: btrfsitem.ROOT_ITEM_KEY,
+ Offset: 0,
+ })
+ if err != nil {
+ sv.rootErr = err
+ return
+ }
+
+ rootBody, ok := root.Body.(btrfsitem.Root)
+ if !ok {
+ sv.rootErr = fmt.Errorf("FS_TREE ROOT_ITEM has malformed body")
+ return
+ }
+
+ sv.rootVal = rootBody
+ })
+}
+
+func (sv *Subvolume) GetRootInode() (ObjID, error) {
+ sv.init()
+ return sv.rootVal.RootDirID, sv.rootErr
+}
+
+func (sv *Subvolume) LoadBareInode(inode ObjID) (*BareInode, error) {
+ val := sv.bareInodeCache.GetOrElse(inode, func() (val *BareInode) {
+ val = &BareInode{
+ Inode: inode,
+ }
+ item, err := sv.FS.TreeLookup(sv.TreeID, Key{
+ ObjectID: inode,
+ ItemType: btrfsitem.INODE_ITEM_KEY,
+ Offset: 0,
+ })
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+
+ itemBody, ok := item.Body.(btrfsitem.Inode)
+ if !ok {
+ val.Errs = append(val.Errs, fmt.Errorf("malformed inode"))
+ return
+ }
+ val.InodeItem = &itemBody
+
+ return
+ })
+ if val.InodeItem == nil {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (sv *Subvolume) LoadFullInode(inode ObjID) (*FullInode, error) {
+ val := sv.fullInodeCache.GetOrElse(inode, func() (val *FullInode) {
+ val = &FullInode{
+ BareInode: BareInode{
+ Inode: inode,
+ },
+ }
+ items, err := sv.FS.TreeSearchAll(sv.TreeID, func(key Key) int {
+ return util.CmpUint(inode, key.ObjectID)
+ })
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ if len(items) == 0 {
+ return
+ }
+ }
+ for _, item := range items {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_ITEM_KEY:
+ itemBody := item.Body.(btrfsitem.Inode)
+ if val.InodeItem != nil {
+ if !reflect.DeepEqual(itemBody, *val.InodeItem) {
+ val.Errs = append(val.Errs, fmt.Errorf("multiple inodes"))
+ }
+ continue
+ }
+ val.InodeItem = &itemBody
+ default:
+ val.OtherItems = append(val.OtherItems, item)
+ }
+ }
+ return
+ })
+ if val.InodeItem == nil && val.OtherItems == nil {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (sv *Subvolume) LoadDir(inode ObjID) (*Dir, error) {
+ val := sv.dirCache.GetOrElse(inode, func() (val *Dir) {
+ val = new(Dir)
+ fullInode, err := sv.LoadFullInode(inode)
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+ val.FullInode = *fullInode
+ val.SV = sv
+ val.populate()
+ return
+ })
+ if val.Inode == 0 {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (ret *Dir) populate() {
+ ret.ChildrenByName = make(map[string]btrfsitem.DirEntry)
+ ret.ChildrenByIndex = make(map[uint64]btrfsitem.DirEntry)
+ for _, item := range ret.OtherItems {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_REF_KEY:
+ ref := InodeRef{
+ Inode: ObjID(item.Head.Key.Offset),
+ InodeRef: item.Body.(btrfsitem.InodeRef),
+ }
+ if ret.DotDot != nil {
+ if !reflect.DeepEqual(ref, *ret.DotDot) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple INODE_REF items on a directory"))
+ }
+ continue
+ }
+ ret.DotDot = &ref
+ case btrfsitem.DIR_ITEM_KEY:
+ body := item.Body.(btrfsitem.DirEntries)
+ if len(body) != 1 {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_ITEM?"))
+ continue
+ }
+ for _, entry := range body {
+ namehash := btrfsitem.NameHash(entry.Name)
+ if namehash != item.Head.Key.Offset {
+ ret.Errs = append(ret.Errs, fmt.Errorf("direntry crc32c mismatch: key=%#x crc32c(%q)=%#x",
+ item.Head.Key.Offset, entry.Name, namehash))
+ continue
+ }
+ if other, exists := ret.ChildrenByName[string(entry.Name)]; exists {
+ if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry name %q", entry.Name))
+ }
+ continue
+ }
+ ret.ChildrenByName[string(entry.Name)] = entry
+ }
+ case btrfsitem.DIR_INDEX_KEY:
+ index := item.Head.Key.Offset
+ body := item.Body.(btrfsitem.DirEntries)
+ if len(body) != 1 {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple direntries in single DIR_INDEX?"))
+ continue
+ }
+ for _, entry := range body {
+ if other, exists := ret.ChildrenByIndex[index]; exists {
+ if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("multiple instances of direntry index %v", index))
+ }
+ continue
+ }
+ ret.ChildrenByIndex[index] = entry
+ }
+ //case btrfsitem.XATTR_ITEM_KEY:
+ default:
+ panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
+ }
+ }
+ entriesWithIndexes := make(map[string]struct{})
+ nextIndex := uint64(2)
+ for index, entry := range ret.ChildrenByIndex {
+ if index+1 > nextIndex {
+ nextIndex = index + 1
+ }
+ entriesWithIndexes[string(entry.Name)] = struct{}{}
+ if other, exists := ret.ChildrenByName[string(entry.Name)]; !exists {
+ ret.Errs = append(ret.Errs, fmt.Errorf("missing by-name direntry for %q", entry.Name))
+ ret.ChildrenByName[string(entry.Name)] = entry
+ } else if !reflect.DeepEqual(entry, other) {
+ ret.Errs = append(ret.Errs, fmt.Errorf("direntry index %v and direntry name %q disagree", index, entry.Name))
+ ret.ChildrenByName[string(entry.Name)] = entry
+ }
+ }
+ for _, name := range util.SortedMapKeys(ret.ChildrenByName) {
+ if _, exists := entriesWithIndexes[name]; !exists {
+ ret.Errs = append(ret.Errs, fmt.Errorf("missing by-index direntry for %q", name))
+ ret.ChildrenByIndex[nextIndex] = ret.ChildrenByName[name]
+ nextIndex++
+ }
+ }
+}
+
+func (dir *Dir) AbsPath() (string, error) {
+ rootInode, err := dir.SV.GetRootInode()
+ if err != nil {
+ return "", err
+ }
+ if rootInode == dir.Inode {
+ return "/", nil
+ }
+ if dir.DotDot == nil {
+ return "", fmt.Errorf("missing .. entry in dir inode %v", dir.Inode)
+ }
+ parent, err := dir.SV.LoadDir(dir.DotDot.Inode)
+ if err != nil {
+ return "", err
+ }
+ parentName, err := parent.AbsPath()
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(parentName, string(dir.DotDot.Name)), nil
+}
+
+func (sv *Subvolume) LoadFile(inode ObjID) (*File, error) {
+ val := sv.fileCache.GetOrElse(inode, func() (val *File) {
+ val = new(File)
+ fullInode, err := sv.LoadFullInode(inode)
+ if err != nil {
+ val.Errs = append(val.Errs, err)
+ return
+ }
+ val.FullInode = *fullInode
+ val.SV = sv
+ val.populate()
+ return
+ })
+ if val.Inode == 0 {
+ return nil, val.Errs
+ }
+ return val, nil
+}
+
+func (ret *File) populate() {
+ for _, item := range ret.OtherItems {
+ switch item.Head.Key.ItemType {
+ case btrfsitem.INODE_REF_KEY:
+ // TODO
+ case btrfsitem.EXTENT_DATA_KEY:
+ ret.Extents = append(ret.Extents, FileExtent{
+ OffsetWithinFile: int64(item.Head.Key.Offset),
+ FileExtent: item.Body.(btrfsitem.FileExtent),
+ })
+ default:
+ panic(fmt.Errorf("TODO: handle item type %v", item.Head.Key.ItemType))
+ }
+ }
+
+ // These should already be sorted, because of the nature of
+ // the btree; but this is a recovery tool for corrupt
+ // filesystems, so go ahead and ensure that it's sorted.
+ sort.Slice(ret.Extents, func(i, j int) bool {
+ return ret.Extents[i].OffsetWithinFile < ret.Extents[j].OffsetWithinFile
+ })
+
+ pos := int64(0)
+ for _, extent := range ret.Extents {
+ if extent.OffsetWithinFile != pos {
+ if extent.OffsetWithinFile > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, extent.OffsetWithinFile))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent overlap from %v to %v",
+ extent.OffsetWithinFile, pos))
+ }
+ }
+ size, err := extent.Size()
+ if err != nil {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent %v: %w", extent.OffsetWithinFile, err))
+ }
+ pos += size
+ }
+ if ret.InodeItem != nil && pos != ret.InodeItem.Size {
+ if ret.InodeItem.Size > pos {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent gap from %v to %v",
+ pos, ret.InodeItem.Size))
+ } else {
+ ret.Errs = append(ret.Errs, fmt.Errorf("extent mapped past end of file from %v to %v",
+ ret.InodeItem.Size, pos))
+ }
+ }
+}
+
+func (file *File) ReadAt(dat []byte, off int64) (int, error) {
+ // These stateles maybe-short-reads each do an O(n) extent
+ // lookup, so reading a file is O(n^2), but we expect n to be
+ // small, so whatev. Turn file.Extents it in to an rbtree if
+ // it becomes a problem.
+ done := 0
+ for done < len(dat) {
+ n, err := file.maybeShortReadAt(dat[done:], off+int64(done))
+ done += n
+ if err != nil {
+ return done, err
+ }
+ }
+ return done, nil
+}
+
+func (file *File) maybeShortReadAt(dat []byte, off int64) (int, error) {
+ for _, extent := range file.Extents {
+ extBeg := extent.OffsetWithinFile
+ if extBeg > off {
+ break
+ }
+ extLen, err := extent.Size()
+ if err != nil {
+ continue
+ }
+ extEnd := extBeg + extLen
+ if extEnd <= off {
+ continue
+ }
+ offsetWithinExt := off - extent.OffsetWithinFile
+ readSize := util.Min(int64(len(dat)), extLen-offsetWithinExt)
+ switch extent.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ return copy(dat, extent.BodyInline[offsetWithinExt:offsetWithinExt+readSize]), nil
+ case btrfsitem.FILE_EXTENT_REG, btrfsitem.FILE_EXTENT_PREALLOC:
+ return file.SV.FS.ReadAt(dat[:readSize],
+ extent.BodyExtent.DiskByteNr.
+ Add(extent.BodyExtent.Offset).
+ Add(btrfsvol.AddrDelta(offsetWithinExt)))
+ }
+ }
+ if file.InodeItem != nil && off >= file.InodeItem.Size {
+ return 0, io.EOF
+ }
+ return 0, fmt.Errorf("read: could not map position %v", off)
+}
+
+var _ io.ReaderAt = (*File)(nil)
diff --git a/lib/btrfs/types_node.go b/lib/btrfs/types_node.go
new file mode 100644
index 0000000..5934f40
--- /dev/null
+++ b/lib/btrfs/types_node.go
@@ -0,0 +1,411 @@
+package btrfs
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type NodeFlags uint64
+
+func (NodeFlags) BinaryStaticSize() int {
+ return 7
+}
+func (f NodeFlags) MarshalBinary() ([]byte, error) {
+ var bs [8]byte
+ binary.LittleEndian.PutUint64(bs[:], uint64(f))
+ return bs[:7], nil
+}
+func (f *NodeFlags) UnmarshalBinary(dat []byte) (int, error) {
+ var bs [8]byte
+ copy(bs[:7], dat[:7])
+ *f = NodeFlags(binary.LittleEndian.Uint64(bs[:]))
+ return 7, nil
+}
+
+var (
+ _ binstruct.StaticSizer = NodeFlags(0)
+ _ binstruct.Marshaler = NodeFlags(0)
+ _ binstruct.Unmarshaler = (*NodeFlags)(nil)
+)
+
+const (
+ NodeWritten = NodeFlags(1 << iota)
+ NodeReloc
+)
+
+var nodeFlagNames = []string{
+ "WRITTEN",
+ "RELOC",
+}
+
+func (f NodeFlags) Has(req NodeFlags) bool { return f&req == req }
+func (f NodeFlags) String() string { return util.BitfieldString(f, nodeFlagNames, util.HexLower) }
+
+type BackrefRev uint8
+
+const (
+ OldBackrefRev = BackrefRev(iota)
+ MixedBackrefRev = BackrefRev(iota)
+)
+
+// Node: main //////////////////////////////////////////////////////////////////////////////////////
+
+type Node struct {
+ // Some context from the parent filesystem
+ Size uint32 // superblock.NodeSize
+ ChecksumType btrfssum.CSumType // superblock.ChecksumType
+
+ // The node's header (always present)
+ Head NodeHeader
+
+ // The node's body (which one of these is present depends on
+ // the node's type, as specified in the header)
+ BodyInternal []KeyPointer // for internal nodes
+ BodyLeaf []Item // for leave nodes
+
+ Padding []byte
+}
+
+type NodeHeader struct {
+ Checksum btrfssum.CSum `bin:"off=0x0, siz=0x20"`
+ MetadataUUID UUID `bin:"off=0x20, siz=0x10"`
+ Addr btrfsvol.LogicalAddr `bin:"off=0x30, siz=0x8"` // Logical address of this node
+ Flags NodeFlags `bin:"off=0x38, siz=0x7"`
+ BackrefRev BackrefRev `bin:"off=0x3f, siz=0x1"`
+ ChunkTreeUUID UUID `bin:"off=0x40, siz=0x10"`
+ Generation Generation `bin:"off=0x50, siz=0x8"`
+ Owner ObjID `bin:"off=0x58, siz=0x8"` // The ID of the tree that contains this node
+ NumItems uint32 `bin:"off=0x60, siz=0x4"` // [ignored-when-writing]
+ Level uint8 `bin:"off=0x64, siz=0x1"` // 0 for leaf nodes, >=1 for internal nodes
+ binstruct.End `bin:"off=0x65"`
+}
+
+// MaxItems returns the maximum possible valid value of
+// .Head.NumItems.
+func (node Node) MaxItems() uint32 {
+ bodyBytes := node.Size - uint32(binstruct.StaticSize(NodeHeader{}))
+ if node.Head.Level > 0 {
+ return bodyBytes / uint32(binstruct.StaticSize(KeyPointer{}))
+ } else {
+ return bodyBytes / uint32(binstruct.StaticSize(ItemHeader{}))
+ }
+}
+
+func (node Node) CalculateChecksum() (btrfssum.CSum, error) {
+ data, err := binstruct.Marshal(node)
+ if err != nil {
+ return btrfssum.CSum{}, err
+ }
+ return node.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+}
+
+func (node Node) ValidateChecksum() error {
+ stored := node.Head.Checksum
+ calced, err := node.CalculateChecksum()
+ if err != nil {
+ return err
+ }
+ if calced != stored {
+ return fmt.Errorf("node checksum mismatch: stored=%v calculated=%v",
+ stored, calced)
+ }
+ return nil
+}
+
+func (node *Node) UnmarshalBinary(nodeBuf []byte) (int, error) {
+ *node = Node{
+ Size: uint32(len(nodeBuf)),
+ ChecksumType: node.ChecksumType,
+ }
+ n, err := binstruct.Unmarshal(nodeBuf, &node.Head)
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: %w", err)
+ }
+ if node.Head.Level > 0 {
+ _n, err := node.unmarshalInternal(nodeBuf[n:])
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: internal: %w", err)
+ }
+ } else {
+ _n, err := node.unmarshalLeaf(nodeBuf[n:])
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: leaf: %w", err)
+ }
+ }
+ if n != len(nodeBuf) {
+ return n, fmt.Errorf("btrfs.Node.UnmarshalBinary: left over data: got %v bytes but only consumed %v",
+ len(nodeBuf), n)
+ }
+ return n, nil
+}
+
+func (node Node) MarshalBinary() ([]byte, error) {
+ if node.Size == 0 {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: .Size must be set")
+ }
+ if node.Size <= uint32(binstruct.StaticSize(NodeHeader{})) {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: .Size must be greater than %v",
+ binstruct.StaticSize(NodeHeader{}))
+ }
+ if node.Head.Level > 0 {
+ node.Head.NumItems = uint32(len(node.BodyInternal))
+ } else {
+ node.Head.NumItems = uint32(len(node.BodyLeaf))
+ }
+
+ buf := make([]byte, node.Size)
+
+ if bs, err := binstruct.Marshal(node.Head); err != nil {
+ return buf, err
+ } else if len(bs) != binstruct.StaticSize(NodeHeader{}) {
+ return nil, fmt.Errorf("btrfs.Node.MarshalBinary: header is %v bytes but expected %v",
+ len(bs), binstruct.StaticSize(NodeHeader{}))
+ } else {
+ copy(buf, bs)
+ }
+
+ if node.Head.Level > 0 {
+ if err := node.marshalInternalTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ return buf, err
+ }
+ } else {
+ if err := node.marshalLeafTo(buf[binstruct.StaticSize(NodeHeader{}):]); err != nil {
+ return buf, err
+ }
+ }
+
+ return buf, nil
+}
+
+// Node: "internal" ////////////////////////////////////////////////////////////////////////////////
+
+type KeyPointer struct {
+ Key Key `bin:"off=0x0, siz=0x11"`
+ BlockPtr btrfsvol.LogicalAddr `bin:"off=0x11, siz=0x8"`
+ Generation Generation `bin:"off=0x19, siz=0x8"`
+ binstruct.End `bin:"off=0x21"`
+}
+
+func (node *Node) unmarshalInternal(bodyBuf []byte) (int, error) {
+ n := 0
+ for i := uint32(0); i < node.Head.NumItems; i++ {
+ var item KeyPointer
+ _n, err := binstruct.Unmarshal(bodyBuf[n:], &item)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("item %v: %w", i, err)
+ }
+ node.BodyInternal = append(node.BodyInternal, item)
+ }
+ node.Padding = bodyBuf[n:]
+ return len(bodyBuf), nil
+}
+
+func (node *Node) marshalInternalTo(bodyBuf []byte) error {
+ n := 0
+ for i, item := range node.BodyInternal {
+ bs, err := binstruct.Marshal(item)
+ if err != nil {
+ return fmt.Errorf("item %v: %w", i, err)
+ }
+ if copy(bodyBuf[n:], bs) < len(bs) {
+ return fmt.Errorf("item %v: not enough space: need at least %v+%v=%v bytes, but only have %v",
+ i, n, len(bs), n+len(bs), len(bodyBuf))
+ }
+ n += len(bs)
+ }
+ if copy(bodyBuf[n:], node.Padding) < len(node.Padding) {
+ return fmt.Errorf("padding: not enough space: need at least %v+%v=%v bytes, but only have %v",
+ n, len(node.Padding), n+len(node.Padding), len(bodyBuf))
+ }
+ return nil
+}
+
+// Node: "leaf" ////////////////////////////////////////////////////////////////////////////////////
+
+type Item struct {
+ Head ItemHeader
+ Body btrfsitem.Item
+}
+
+type ItemHeader struct {
+ Key Key `bin:"off=0x0, siz=0x11"`
+ DataOffset uint32 `bin:"off=0x11, siz=0x4"` // [ignored-when-writing] relative to the end of the header (0x65)
+ DataSize uint32 `bin:"off=0x15, siz=0x4"` // [ignored-when-writing]
+ binstruct.End `bin:"off=0x19"`
+}
+
+func (node *Node) unmarshalLeaf(bodyBuf []byte) (int, error) {
+ head := 0
+ tail := len(bodyBuf)
+ for i := uint32(0); i < node.Head.NumItems; i++ {
+ var item Item
+
+ n, err := binstruct.Unmarshal(bodyBuf[head:], &item.Head)
+ head += n
+ if err != nil {
+ return 0, fmt.Errorf("item %v: head: %w", i, err)
+ }
+ if head > tail {
+ return 0, fmt.Errorf("item %v: head: end_offset=%#x is in the body section (offset>%#x)",
+ i, head, tail)
+ }
+
+ dataOff := int(item.Head.DataOffset)
+ if dataOff < head {
+ return 0, fmt.Errorf("item %v: body: beg_offset=%#x is in the head section (offset<%#x)",
+ i, dataOff, head)
+ }
+ dataSize := int(item.Head.DataSize)
+ if dataOff+dataSize != tail {
+ return 0, fmt.Errorf("item %v: body: end_offset=%#x is not cur_tail=%#x)",
+ i, dataOff+dataSize, tail)
+ }
+ tail = dataOff
+ dataBuf := bodyBuf[dataOff : dataOff+dataSize]
+ item.Body = btrfsitem.UnmarshalItem(item.Head.Key, node.ChecksumType, dataBuf)
+
+ node.BodyLeaf = append(node.BodyLeaf, item)
+ }
+
+ node.Padding = bodyBuf[head:tail]
+ return len(bodyBuf), nil
+}
+
+func (node *Node) marshalLeafTo(bodyBuf []byte) error {
+ head := 0
+ tail := len(bodyBuf)
+ for i, item := range node.BodyLeaf {
+ itemBodyBuf, err := binstruct.Marshal(item.Body)
+ if err != nil {
+ return fmt.Errorf("item %v: body: %w", i, err)
+ }
+ item.Head.DataSize = uint32(len(itemBodyBuf))
+ item.Head.DataOffset = uint32(tail - len(itemBodyBuf))
+ itemHeadBuf, err := binstruct.Marshal(item.Head)
+ if err != nil {
+ return fmt.Errorf("item %v: head: %w", i, err)
+ }
+
+ if tail-head < len(itemHeadBuf)+len(itemBodyBuf) {
+ return fmt.Errorf("item %v: not enough space: need at least (head_len:%v)+(body_len:%v)=%v free bytes, but only have %v",
+ i, len(itemHeadBuf), len(itemBodyBuf), len(itemHeadBuf)+len(itemBodyBuf), tail-head)
+ }
+
+ copy(bodyBuf[head:], itemHeadBuf)
+ head += len(itemHeadBuf)
+ tail -= len(itemBodyBuf)
+ copy(bodyBuf[tail:], itemBodyBuf)
+ }
+ if copy(bodyBuf[head:tail], node.Padding) < len(node.Padding) {
+ return fmt.Errorf("padding: not enough space: need at least %v free bytes, but only have %v",
+ len(node.Padding), tail-head)
+ }
+ return nil
+}
+
+func (node *Node) LeafFreeSpace() uint32 {
+ if node.Head.Level > 0 {
+ panic(fmt.Errorf("Node.LeafFreeSpace: not a leaf node"))
+ }
+ freeSpace := node.Size
+ freeSpace -= uint32(binstruct.StaticSize(NodeHeader{}))
+ for _, item := range node.BodyLeaf {
+ freeSpace -= uint32(binstruct.StaticSize(ItemHeader{}))
+ freeSpace -= item.Head.DataSize
+ }
+ return freeSpace
+}
+
+// Tie Nodes in to the FS //////////////////////////////////////////////////////////////////////////
+
+var ErrNotANode = errors.New("does not look like a node")
+
+func ReadNode[Addr ~int64](fs util.File[Addr], sb Superblock, addr Addr, laddrCB func(btrfsvol.LogicalAddr) error) (*util.Ref[Addr, Node], error) {
+ nodeBuf := make([]byte, sb.NodeSize)
+ if _, err := fs.ReadAt(nodeBuf, addr); err != nil {
+ return nil, err
+ }
+
+ // parse (early)
+
+ nodeRef := &util.Ref[Addr, Node]{
+ File: fs,
+ Addr: addr,
+ Data: Node{
+ Size: sb.NodeSize,
+ ChecksumType: sb.ChecksumType,
+ },
+ }
+ if _, err := binstruct.Unmarshal(nodeBuf, &nodeRef.Data.Head); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+
+ // sanity checking
+
+ if nodeRef.Data.Head.MetadataUUID != sb.EffectiveMetadataUUID() {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, ErrNotANode)
+ }
+
+ stored := nodeRef.Data.Head.Checksum
+ calced, err := nodeRef.Data.ChecksumType.Sum(nodeBuf[binstruct.StaticSize(btrfssum.CSum{}):])
+ if err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+ if stored != calced {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: looks like a node but is corrupt: checksum mismatch: stored=%v calculated=%v",
+ addr, stored, calced)
+ }
+
+ if laddrCB != nil {
+ if err := laddrCB(nodeRef.Data.Head.Addr); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+ }
+
+ // parse (main)
+
+ if _, err := nodeRef.Data.UnmarshalBinary(nodeBuf); err != nil {
+ return nodeRef, fmt.Errorf("btrfs.ReadNode: node@%v: %w", addr, err)
+ }
+
+ // return
+
+ return nodeRef, nil
+}
+
+func (fs *FS) ReadNode(addr btrfsvol.LogicalAddr) (*util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ sb, err := fs.Superblock()
+ if err != nil {
+ return nil, fmt.Errorf("btrfs.FS.ReadNode: %w", err)
+ }
+
+ return ReadNode[btrfsvol.LogicalAddr](fs, sb.Data, addr, func(claimAddr btrfsvol.LogicalAddr) error {
+ if claimAddr != addr {
+ return fmt.Errorf("read from laddr=%v but claims to be at laddr=%v",
+ addr, claimAddr)
+ }
+ return nil
+ })
+}
+
+func (fs *FS) readNodeAtLevel(addr btrfsvol.LogicalAddr, expLevel uint8) (*util.Ref[btrfsvol.LogicalAddr, Node], error) {
+ node, err := fs.ReadNode(addr)
+ if err != nil {
+ return node, err
+ }
+ if node.Data.Head.Level != expLevel {
+ return node, fmt.Errorf("btrfs.FS.ReadNode: node@%v: expected level %v but has level %v",
+ node.Addr, expLevel, node.Data.Head.Level)
+ }
+ return node, nil
+}
diff --git a/lib/btrfs/types_superblock.go b/lib/btrfs/types_superblock.go
new file mode 100644
index 0000000..373de48
--- /dev/null
+++ b/lib/btrfs/types_superblock.go
@@ -0,0 +1,233 @@
+package btrfs
+
+import (
+ "fmt"
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type Superblock struct {
+ Checksum btrfssum.CSum `bin:"off=0x0, siz=0x20"` // Checksum of everything past this field (from 0x20 to 0x1000)
+ FSUUID UUID `bin:"off=0x20, siz=0x10"` // FS UUID
+ Self btrfsvol.PhysicalAddr `bin:"off=0x30, siz=0x8"` // physical address of this block (different for mirrors)
+ Flags uint64 `bin:"off=0x38, siz=0x8"` // flags
+ Magic [8]byte `bin:"off=0x40, siz=0x8"` // magic ('_BHRfS_M')
+ Generation Generation `bin:"off=0x48, siz=0x8"`
+
+ RootTree btrfsvol.LogicalAddr `bin:"off=0x50, siz=0x8"` // logical address of the root tree root
+ ChunkTree btrfsvol.LogicalAddr `bin:"off=0x58, siz=0x8"` // logical address of the chunk tree root
+ LogTree btrfsvol.LogicalAddr `bin:"off=0x60, siz=0x8"` // logical address of the log tree root
+
+ LogRootTransID uint64 `bin:"off=0x68, siz=0x8"` // log_root_transid
+ TotalBytes uint64 `bin:"off=0x70, siz=0x8"` // total_bytes
+ BytesUsed uint64 `bin:"off=0x78, siz=0x8"` // bytes_used
+ RootDirObjectID ObjID `bin:"off=0x80, siz=0x8"` // root_dir_objectid (usually 6)
+ NumDevices uint64 `bin:"off=0x88, siz=0x8"` // num_devices
+
+ SectorSize uint32 `bin:"off=0x90, siz=0x4"`
+ NodeSize uint32 `bin:"off=0x94, siz=0x4"`
+ LeafSize uint32 `bin:"off=0x98, siz=0x4"` // unused; must be the same as NodeSize
+ StripeSize uint32 `bin:"off=0x9c, siz=0x4"`
+ SysChunkArraySize uint32 `bin:"off=0xa0, siz=0x4"`
+
+ ChunkRootGeneration Generation `bin:"off=0xa4, siz=0x8"`
+ CompatFlags uint64 `bin:"off=0xac, siz=0x8"` // compat_flags
+ CompatROFlags uint64 `bin:"off=0xb4, siz=0x8"` // compat_ro_flags - only implementations that support the flags can write to the filesystem
+ IncompatFlags IncompatFlags `bin:"off=0xbc, siz=0x8"` // incompat_flags - only implementations that support the flags can use the filesystem
+ ChecksumType btrfssum.CSumType `bin:"off=0xc4, siz=0x2"`
+
+ RootLevel uint8 `bin:"off=0xc6, siz=0x1"` // root_level
+ ChunkLevel uint8 `bin:"off=0xc7, siz=0x1"` // chunk_root_level
+ LogLevel uint8 `bin:"off=0xc8, siz=0x1"` // log_root_level
+
+ DevItem btrfsitem.Dev `bin:"off=0xc9, siz=0x62"` // DEV_ITEM data for this device
+ Label [0x100]byte `bin:"off=0x12b, siz=0x100"` // label (may not contain '/' or '\\')
+ CacheGeneration Generation `bin:"off=0x22b, siz=0x8"`
+ UUIDTreeGeneration Generation `bin:"off=0x233, siz=0x8"`
+
+ // FeatureIncompatMetadataUUID
+ MetadataUUID UUID `bin:"off=0x23b, siz=0x10"`
+
+ // FeatureIncompatExtentTreeV2
+ NumGlobalRoots uint64 `bin:"off=0x24b, siz=0x8"`
+
+ // FeatureIncompatExtentTreeV2
+ BlockGroupRoot btrfsvol.LogicalAddr `bin:"off=0x253, siz=0x8"`
+ BlockGroupRootGeneration Generation `bin:"off=0x25b, siz=0x8"`
+ BlockGroupRootLevel uint8 `bin:"off=0x263, siz=0x1"`
+
+ Reserved [199]byte `bin:"off=0x264, siz=0xc7"` // future expansion
+
+ SysChunkArray [0x800]byte `bin:"off=0x32b, siz=0x800"` // sys_chunk_array:(n bytes valid) Contains (KEY . CHUNK_ITEM) pairs for all SYSTEM chunks. This is needed to bootstrap the mapping from logical addresses to physical.
+ SuperRoots [4]RootBackup `bin:"off=0xb2b, siz=0x2a0"`
+
+ // Padded to 4096 bytes
+ Padding [565]byte `bin:"off=0xdcb, siz=0x235"`
+ binstruct.End `bin:"off=0x1000"`
+}
+
+func (sb Superblock) CalculateChecksum() (btrfssum.CSum, error) {
+ data, err := binstruct.Marshal(sb)
+ if err != nil {
+ return btrfssum.CSum{}, err
+ }
+ return sb.ChecksumType.Sum(data[binstruct.StaticSize(btrfssum.CSum{}):])
+}
+
+func (sb Superblock) ValidateChecksum() error {
+ stored := sb.Checksum
+ calced, err := sb.CalculateChecksum()
+ if err != nil {
+ return err
+ }
+ if calced != stored {
+ return fmt.Errorf("superblock checksum mismatch: stored=%v calculated=%v",
+ stored, calced)
+ }
+ return nil
+}
+
+func (a Superblock) Equal(b Superblock) bool {
+ a.Checksum = btrfssum.CSum{}
+ a.Self = 0
+
+ b.Checksum = btrfssum.CSum{}
+ b.Self = 0
+
+ return reflect.DeepEqual(a, b)
+}
+
+func (sb Superblock) EffectiveMetadataUUID() UUID {
+ if !sb.IncompatFlags.Has(FeatureIncompatMetadataUUID) {
+ return sb.FSUUID
+ }
+ return sb.MetadataUUID
+}
+
+type SysChunk struct {
+ Key Key
+ Chunk btrfsitem.Chunk
+}
+
+func (sc SysChunk) MarshalBinary() ([]byte, error) {
+ dat, err := binstruct.Marshal(sc.Key)
+ if err != nil {
+ return dat, fmt.Errorf("%T.MarshalBinary: %w", sc, err)
+ }
+ _dat, err := binstruct.Marshal(sc.Chunk)
+ dat = append(dat, _dat...)
+ if err != nil {
+ return dat, fmt.Errorf("%T.MarshalBinary: %w", sc, err)
+ }
+ return dat, nil
+}
+
+func (sc *SysChunk) UnmarshalBinary(dat []byte) (int, error) {
+ n, err := binstruct.Unmarshal(dat, &sc.Key)
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *sc, err)
+ }
+ _n, err := binstruct.Unmarshal(dat[n:], &sc.Chunk)
+ n += _n
+ if err != nil {
+ return n, fmt.Errorf("%T.UnmarshalBinary: %w", *sc, err)
+ }
+ return n, nil
+}
+
+func (sb Superblock) ParseSysChunkArray() ([]SysChunk, error) {
+ dat := sb.SysChunkArray[:sb.SysChunkArraySize]
+ var ret []SysChunk
+ for len(dat) > 0 {
+ var pair SysChunk
+ n, err := binstruct.Unmarshal(dat, &pair)
+ dat = dat[n:]
+ if err != nil {
+ return nil, err
+ }
+ ret = append(ret, pair)
+ }
+ return ret, nil
+}
+
+type RootBackup struct {
+ TreeRoot ObjID `bin:"off=0x0, siz=0x8"`
+ TreeRootGen Generation `bin:"off=0x8, siz=0x8"`
+
+ ChunkRoot ObjID `bin:"off=0x10, siz=0x8"`
+ ChunkRootGen Generation `bin:"off=0x18, siz=0x8"`
+
+ ExtentRoot ObjID `bin:"off=0x20, siz=0x8"`
+ ExtentRootGen Generation `bin:"off=0x28, siz=0x8"`
+
+ FSRoot ObjID `bin:"off=0x30, siz=0x8"`
+ FSRootGen Generation `bin:"off=0x38, siz=0x8"`
+
+ DevRoot ObjID `bin:"off=0x40, siz=0x8"`
+ DevRootGen Generation `bin:"off=0x48, siz=0x8"`
+
+ ChecksumRoot ObjID `bin:"off=0x50, siz=0x8"`
+ ChecksumRootGen Generation `bin:"off=0x58, siz=0x8"`
+
+ TotalBytes uint64 `bin:"off=0x60, siz=0x8"`
+ BytesUsed uint64 `bin:"off=0x68, siz=0x8"`
+ NumDevices uint64 `bin:"off=0x70, siz=0x8"`
+
+ Unused [8 * 4]byte `bin:"off=0x78, siz=0x20"`
+
+ TreeRootLevel uint8 `bin:"off=0x98, siz=0x1"`
+ ChunkRootLevel uint8 `bin:"off=0x99, siz=0x1"`
+ ExtentRootLevel uint8 `bin:"off=0x9a, siz=0x1"`
+ FSRootLevel uint8 `bin:"off=0x9b, siz=0x1"`
+ DevRootLevel uint8 `bin:"off=0x9c, siz=0x1"`
+ ChecksumRootLevel uint8 `bin:"off=0x9d, siz=0x1"`
+
+ Padding [10]byte `bin:"off=0x9e, siz=0xa"`
+ binstruct.End `bin:"off=0xa8"`
+}
+
+type IncompatFlags uint64
+
+const (
+ FeatureIncompatMixedBackref = IncompatFlags(1 << iota)
+ FeatureIncompatDefaultSubvol
+ FeatureIncompatMixedGroups
+ FeatureIncompatCompressLZO
+ FeatureIncompatCompressZSTD
+ FeatureIncompatBigMetadata // buggy
+ FeatureIncompatExtendedIRef
+ FeatureIncompatRAID56
+ FeatureIncompatSkinnyMetadata
+ FeatureIncompatNoHoles
+ FeatureIncompatMetadataUUID
+ FeatureIncompatRAID1C34
+ FeatureIncompatZoned
+ FeatureIncompatExtentTreeV2
+)
+
+var incompatFlagNames = []string{
+ "FeatureIncompatMixedBackref",
+ "FeatureIncompatDefaultSubvol",
+ "FeatureIncompatMixedGroups",
+ "FeatureIncompatCompressLZO",
+ "FeatureIncompatCompressZSTD",
+ "FeatureIncompatBigMetadata ",
+ "FeatureIncompatExtendedIRef",
+ "FeatureIncompatRAID56",
+ "FeatureIncompatSkinnyMetadata",
+ "FeatureIncompatNoHoles",
+ "FeatureIncompatMetadataUUID",
+ "FeatureIncompatRAID1C34",
+ "FeatureIncompatZoned",
+ "FeatureIncompatExtentTreeV2",
+}
+
+func (f IncompatFlags) Has(req IncompatFlags) bool { return f&req == req }
+func (f IncompatFlags) String() string {
+ return util.BitfieldString(f, incompatFlagNames, util.HexLower)
+}
diff --git a/lib/btrfsmisc/fsck.go b/lib/btrfsmisc/fsck.go
new file mode 100644
index 0000000..9567bdf
--- /dev/null
+++ b/lib/btrfsmisc/fsck.go
@@ -0,0 +1,51 @@
+package btrfsmisc
+
+import (
+ "errors"
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// ScanForNodes mimics btrfs-progs
+// cmds/rescue-chunk-recover.c:scan_one_device(), except rather than
+// doing something itself when it finds a node, it simply calls a
+// callback function.
+func ScanForNodes(dev *btrfs.Device, sb btrfs.Superblock, fn func(*util.Ref[btrfsvol.PhysicalAddr, btrfs.Node], error), prog func(btrfsvol.PhysicalAddr)) error {
+ devSize, err := dev.Size()
+ if err != nil {
+ return err
+ }
+
+ if sb.NodeSize < sb.SectorSize {
+ return fmt.Errorf("node_size(%v) < sector_size(%v)",
+ sb.NodeSize, sb.SectorSize)
+ }
+
+ for pos := btrfsvol.PhysicalAddr(0); pos+btrfsvol.PhysicalAddr(sb.NodeSize) < devSize; pos += btrfsvol.PhysicalAddr(sb.SectorSize) {
+ if util.InSlice(pos, btrfs.SuperblockAddrs) {
+ //fmt.Printf("sector@%v is a superblock\n", pos)
+ continue
+ }
+
+ if prog != nil {
+ prog(pos)
+ }
+
+ nodeRef, err := btrfs.ReadNode[btrfsvol.PhysicalAddr](dev, sb, pos, nil)
+ if err != nil && errors.Is(err, btrfs.ErrNotANode) {
+ continue
+ }
+ fn(nodeRef, err)
+
+ pos += btrfsvol.PhysicalAddr(sb.NodeSize) - btrfsvol.PhysicalAddr(sb.SectorSize)
+ }
+
+ if prog != nil {
+ prog(devSize)
+ }
+
+ return nil
+}
diff --git a/lib/btrfsmisc/open.go b/lib/btrfsmisc/open.go
new file mode 100644
index 0000000..a52926f
--- /dev/null
+++ b/lib/btrfsmisc/open.go
@@ -0,0 +1,24 @@
+package btrfsmisc
+
+import (
+ "fmt"
+ "os"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+)
+
+func Open(flag int, filenames ...string) (*btrfs.FS, error) {
+ fs := new(btrfs.FS)
+ for _, filename := range filenames {
+ fh, err := os.OpenFile(filename, flag, 0)
+ if err != nil {
+ _ = fs.Close()
+ return nil, fmt.Errorf("file %q: %w", filename, err)
+ }
+ if err := fs.AddDevice(&btrfs.Device{File: fh}); err != nil {
+ _ = fs.Close()
+ return nil, fmt.Errorf("file %q: %w", filename, err)
+ }
+ }
+ return fs, nil
+}
diff --git a/lib/btrfsmisc/print_tree.go b/lib/btrfsmisc/print_tree.go
new file mode 100644
index 0000000..69692e7
--- /dev/null
+++ b/lib/btrfsmisc/print_tree.go
@@ -0,0 +1,362 @@
+package btrfsmisc
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfssum"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// PrintTree mimics btrfs-progs
+// kernel-shared/print-tree.c:btrfs_print_tree() and
+// kernel-shared/print-tree.c:btrfs_print_leaf()
+func PrintTree(fs *btrfs.FS, treeID btrfs.ObjID) error {
+ return fs.TreeWalk(treeID, btrfs.TreeWalkHandler{
+ Node: func(path btrfs.TreePath, nodeRef *util.Ref[btrfsvol.LogicalAddr, btrfs.Node], err error) error {
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error: %v: %v\n", path, err)
+ }
+ if nodeRef != nil {
+ printHeaderInfo(nodeRef.Data)
+ }
+ return nil
+ },
+ PreKeyPointer: func(_ btrfs.TreePath, item btrfs.KeyPointer) error {
+ fmt.Printf("\t%v block %v gen %v\n",
+ FmtKey(item.Key),
+ item.BlockPtr,
+ item.Generation)
+ return nil
+ },
+ Item: func(path btrfs.TreePath, item btrfs.Item) error {
+ i := path[len(path)-1].ItemIdx
+ fmt.Printf("\titem %v %v itemoff %v itemsize %v\n",
+ i,
+ FmtKey(item.Head.Key),
+ item.Head.DataOffset,
+ item.Head.DataSize)
+ switch body := item.Body.(type) {
+ case btrfsitem.FreeSpaceHeader:
+ fmt.Printf("\t\tlocation %v\n", FmtKey(body.Location))
+ fmt.Printf("\t\tcache generation %v entries %v bitmaps %v\n",
+ body.Generation, body.NumEntries, body.NumBitmaps)
+ case btrfsitem.Inode:
+ fmt.Printf(""+
+ "\t\tgeneration %v transid %v size %v nbytes %v\n"+
+ "\t\tblock group %v mode %o links %v uid %v gid %v rdev %v\n"+
+ "\t\tsequence %v flags %v\n",
+ body.Generation, body.TransID, body.Size, body.NumBytes,
+ body.BlockGroup, body.Mode, body.NLink, body.UID, body.GID, body.RDev,
+ body.Sequence, body.Flags)
+ fmt.Printf("\t\tatime %v\n", fmtTime(body.ATime))
+ fmt.Printf("\t\tctime %v\n", fmtTime(body.CTime))
+ fmt.Printf("\t\tmtime %v\n", fmtTime(body.MTime))
+ fmt.Printf("\t\totime %v\n", fmtTime(body.OTime))
+ case btrfsitem.InodeRef:
+ fmt.Printf("\t\tindex %v namelen %v name: %s\n",
+ body.Index, body.NameLen, body.Name)
+ //case btrfsitem.INODE_EXTREF_KEY:
+ // // TODO
+ case btrfsitem.DirEntries:
+ for _, dir := range body {
+ fmt.Printf("\t\tlocation %v type %v\n",
+ FmtKey(dir.Location), dir.Type)
+ fmt.Printf("\t\ttransid %v data_len %v name_len %v\n",
+ dir.TransID, dir.DataLen, dir.NameLen)
+ fmt.Printf("\t\tname: %s\n", dir.Name)
+ if len(dir.Data) > 0 {
+ fmt.Printf("\t\tdata %v\n", dir.Data)
+ }
+ }
+ //case btrfsitem.DIR_LOG_INDEX_KEY, btrfsitem.DIR_LOG_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Root:
+ fmt.Printf("\t\tgeneration %v root_dirid %v bytenr %d byte_limit %v bytes_used %v\n",
+ body.Generation, body.RootDirID, body.ByteNr, body.ByteLimit, body.BytesUsed)
+ fmt.Printf("\t\tlast_snapshot %v flags %v refs %v\n",
+ body.LastSnapshot, body.Flags, body.Refs)
+ fmt.Printf("\t\tdrop_progress %v drop_level %v\n",
+ FmtKey(body.DropProgress), body.DropLevel)
+ fmt.Printf("\t\tlevel %v generation_v2 %v\n",
+ body.Level, body.GenerationV2)
+ if body.Generation == body.GenerationV2 {
+ fmt.Printf("\t\tuuid %v\n", body.UUID)
+ fmt.Printf("\t\tparent_uuid %v\n", body.ParentUUID)
+ fmt.Printf("\t\treceived_uuid %v\n", body.ReceivedUUID)
+ fmt.Printf("\t\tctransid %v otransid %v stransid %v rtransid %v\n",
+ body.CTransID, body.OTransID, body.STransID, body.RTransID)
+ fmt.Printf("\t\tctime %v\n", fmtTime(body.CTime))
+ fmt.Printf("\t\totime %v\n", fmtTime(body.OTime))
+ fmt.Printf("\t\tstime %v\n", fmtTime(body.STime))
+ fmt.Printf("\t\trtime %v\n", fmtTime(body.RTime))
+ }
+ case btrfsitem.RootRef:
+ var tag string
+ switch item.Head.Key.ItemType {
+ case btrfsitem.ROOT_REF_KEY:
+ tag = "ref"
+ case btrfsitem.ROOT_BACKREF_KEY:
+ tag = "backref"
+ default:
+ tag = fmt.Sprintf("(error: unhandled RootRef item type: %v)", item.Head.Key.ItemType)
+ }
+ fmt.Printf("\t\troot %v key dirid %v sequence %v name %s\n",
+ tag, body.DirID, body.Sequence, body.Name)
+ case btrfsitem.Extent:
+ fmt.Printf("\t\trefs %v gen %v flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ if body.Head.Flags.Has(btrfsitem.EXTENT_FLAG_TREE_BLOCK) {
+ fmt.Printf("\t\ttree block %v level %v\n",
+ FmtKey(body.Info.Key), body.Info.Level)
+ }
+ printExtentInlineRefs(body.Refs)
+ case btrfsitem.Metadata:
+ fmt.Printf("\t\trefs %v gen %v flags %v\n",
+ body.Head.Refs, body.Head.Generation, body.Head.Flags)
+ fmt.Printf("\t\ttree block skinny level %v\n", item.Head.Key.Offset)
+ printExtentInlineRefs(body.Refs)
+ //case btrfsitem.EXTENT_DATA_REF_KEY:
+ // // TODO
+ //case btrfsitem.SHARED_DATA_REF_KEY:
+ // // TODO
+ case btrfsitem.ExtentCSum:
+ sb, _ := fs.Superblock()
+ sectorSize := btrfsvol.AddrDelta(sb.Data.SectorSize)
+
+ start := btrfsvol.LogicalAddr(item.Head.Key.Offset)
+ itemSize := btrfsvol.AddrDelta(len(body.Sums)) * sectorSize
+ fmt.Printf("\t\trange start %d end %d length %d",
+ start, start.Add(itemSize), itemSize)
+ sumsPerLine := util.Max(1, len(btrfssum.CSum{})/body.ChecksumSize/2)
+
+ pos := start
+ for i, sum := range body.Sums {
+ if i%sumsPerLine == 0 {
+ fmt.Printf("\n\t\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("[%d] 0x%s", pos, sum.Fmt(sb.Data.ChecksumType))
+ pos = pos.Add(sectorSize)
+ }
+ fmt.Printf("\n")
+ case btrfsitem.FileExtent:
+ fmt.Printf("\t\tgeneration %v type %v\n",
+ body.Generation, body.Type)
+ switch body.Type {
+ case btrfsitem.FILE_EXTENT_INLINE:
+ fmt.Printf("\t\tinline extent data size %v ram_bytes %v compression %v\n",
+ len(body.BodyInline), body.RAMBytes, body.Compression)
+ case btrfsitem.FILE_EXTENT_PREALLOC:
+ fmt.Printf("\t\tprealloc data disk byte %v nr %v\n",
+ body.BodyExtent.DiskByteNr,
+ body.BodyExtent.DiskNumBytes)
+ fmt.Printf("\t\tprealloc data offset %v nr %v\n",
+ body.BodyExtent.Offset,
+ body.BodyExtent.NumBytes)
+ case btrfsitem.FILE_EXTENT_REG:
+ fmt.Printf("\t\textent data disk byte %d nr %d\n",
+ body.BodyExtent.DiskByteNr,
+ body.BodyExtent.DiskNumBytes)
+ fmt.Printf("\t\textent data offset %d nr %d ram %v\n",
+ body.BodyExtent.Offset,
+ body.BodyExtent.NumBytes,
+ body.RAMBytes)
+ fmt.Printf("\t\textent compression %v\n",
+ body.Compression)
+ default:
+ fmt.Printf("\t\t(error) unknown file extent type %v", body.Type)
+ }
+ case btrfsitem.BlockGroup:
+ fmt.Printf("\t\tblock group used %v chunk_objectid %v flags %v\n",
+ body.Used, body.ChunkObjectID, body.Flags)
+ case btrfsitem.FreeSpaceInfo:
+ fmt.Printf("\t\tfree space info extent count %v flags %v\n",
+ body.ExtentCount, body.Flags)
+ case btrfsitem.FreeSpaceBitmap:
+ fmt.Printf("\t\tfree space bitmap\n")
+ case btrfsitem.Chunk:
+ fmt.Printf("\t\tlength %d owner %d stripe_len %v type %v\n",
+ body.Head.Size, body.Head.Owner, body.Head.StripeLen, body.Head.Type)
+ fmt.Printf("\t\tio_align %v io_width %v sector_size %v\n",
+ body.Head.IOOptimalAlign, body.Head.IOOptimalWidth, body.Head.IOMinSize)
+ fmt.Printf("\t\tnum_stripes %v sub_stripes %v\n",
+ body.Head.NumStripes, body.Head.SubStripes)
+ for i, stripe := range body.Stripes {
+ fmt.Printf("\t\t\tstripe %v devid %d offset %d\n",
+ i, stripe.DeviceID, stripe.Offset)
+ fmt.Printf("\t\t\tdev_uuid %v\n",
+ stripe.DeviceUUID)
+ }
+ case btrfsitem.Dev:
+ fmt.Printf(""+
+ "\t\tdevid %d total_bytes %v bytes_used %v\n"+
+ "\t\tio_align %v io_width %v sector_size %v type %v\n"+
+ "\t\tgeneration %v start_offset %v dev_group %v\n"+
+ "\t\tseek_speed %v bandwidth %v\n"+
+ "\t\tuuid %v\n"+
+ "\t\tfsid %v\n",
+ body.DevID, body.NumBytes, body.NumBytesUsed,
+ body.IOOptimalAlign, body.IOOptimalWidth, body.IOMinSize, body.Type,
+ body.Generation, body.StartOffset, body.DevGroup,
+ body.SeekSpeed, body.Bandwidth,
+ body.DevUUID,
+ body.FSUUID)
+ case btrfsitem.DevExtent:
+ fmt.Printf(""+
+ "\t\tdev extent chunk_tree %v\n"+
+ "\t\tchunk_objectid %v chunk_offset %d length %d\n"+
+ "\t\tchunk_tree_uuid %v\n",
+ body.ChunkTree, body.ChunkObjectID, body.ChunkOffset, body.Length,
+ body.ChunkTreeUUID)
+ //case btrfsitem.QGROUP_STATUS_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_INFO_KEY:
+ // // TODO
+ //case btrfsitem.QGROUP_LIMIT_KEY:
+ // // TODO
+ case btrfsitem.UUIDMap:
+ fmt.Printf("\t\tsubvol_id %d\n", body.ObjID)
+ //case btrfsitem.STRING_ITEM_KEY:
+ // // TODO
+ case btrfsitem.DevStats:
+ fmt.Printf("\t\tpersistent item objectid %v offset %v\n",
+ item.Head.Key.ObjectID.Format(item.Head.Key.ItemType), item.Head.Key.Offset)
+ switch item.Head.Key.ObjectID {
+ case btrfs.DEV_STATS_OBJECTID:
+ fmt.Printf("\t\tdevice stats\n")
+ fmt.Printf("\t\twrite_errs %v read_errs %v flush_errs %v corruption_errs %v generation %v\n",
+ body.Values[btrfsitem.DEV_STAT_WRITE_ERRS],
+ body.Values[btrfsitem.DEV_STAT_READ_ERRS],
+ body.Values[btrfsitem.DEV_STAT_FLUSH_ERRS],
+ body.Values[btrfsitem.DEV_STAT_CORRUPTION_ERRS],
+ body.Values[btrfsitem.DEV_STAT_GENERATION_ERRS])
+ default:
+ fmt.Printf("\t\tunknown persistent item objectid %v\n", item.Head.Key.ObjectID)
+ }
+ //case btrfsitem.TEMPORARY_ITEM_KEY:
+ // // TODO
+ case btrfsitem.Empty:
+ switch item.Head.Key.ItemType {
+ case btrfsitem.ORPHAN_ITEM_KEY: // 48
+ fmt.Printf("\t\torphan item\n")
+ case btrfsitem.TREE_BLOCK_REF_KEY: // 176
+ fmt.Printf("\t\ttree block backref\n")
+ case btrfsitem.SHARED_BLOCK_REF_KEY: // 182
+ fmt.Printf("\t\tshared block backref\n")
+ case btrfsitem.FREE_SPACE_EXTENT_KEY: // 199
+ fmt.Printf("\t\tfree space extent\n")
+ case btrfsitem.QGROUP_RELATION_KEY: // 246
+ // do nothing
+ //case btrfsitem.EXTENT_REF_V0_KEY:
+ // fmt.Printf("\t\textent ref v0 (deprecated)\n")
+ //case btrfsitem.CSUM_ITEM_KEY:
+ // fmt.Printf("\t\tcsum item\n")
+ default:
+ fmt.Printf("\t\t(error) unhandled empty item type: %v\n", item.Head.Key.ItemType)
+ }
+ case btrfsitem.Error:
+ fmt.Printf("\t\t(error) error item: %v\n", body.Err)
+ default:
+ fmt.Printf("\t\t(error) unhandled item type: %T\n", body)
+ }
+ return nil
+ },
+ })
+}
+
+// printHeaderInfo mimics btrfs-progs kernel-shared/print-tree.c:print_header_info()
+func printHeaderInfo(node btrfs.Node) {
+ var typename string
+ if node.Head.Level > 0 { // internal node
+ typename = "node"
+ fmt.Printf("node %v level %v items %v free space %v",
+ node.Head.Addr,
+ node.Head.Level,
+ node.Head.NumItems,
+ node.MaxItems()-node.Head.NumItems)
+ } else { // leaf node
+ typename = "leaf"
+ fmt.Printf("leaf %d items %v free space %v",
+ node.Head.Addr,
+ node.Head.NumItems,
+ node.LeafFreeSpace())
+ }
+ fmt.Printf(" generation %v owner %v\n",
+ node.Head.Generation,
+ node.Head.Owner)
+
+ fmt.Printf("%v %d flags %v backref revision %v\n",
+ typename,
+ node.Head.Addr,
+ node.Head.Flags,
+ node.Head.BackrefRev)
+
+ fmt.Printf("checksum stored %v\n", node.Head.Checksum.Fmt(node.ChecksumType))
+ if calcSum, err := node.CalculateChecksum(); err != nil {
+ fmt.Printf("checksum calced %v\n", err)
+ } else {
+ fmt.Printf("checksum calced %v\n", calcSum.Fmt(node.ChecksumType))
+ }
+
+ fmt.Printf("fs uuid %v\n", node.Head.MetadataUUID)
+ fmt.Printf("chunk uuid %v\n", node.Head.ChunkTreeUUID)
+}
+
+// printExtentInlineRefs mimics part of btrfs-progs kernel-shared/print-tree.c:print_extent_item()
+func printExtentInlineRefs(refs []btrfsitem.ExtentInlineRef) {
+ for _, ref := range refs {
+ switch subitem := ref.Body.(type) {
+ case nil:
+ switch ref.Type {
+ case btrfsitem.TREE_BLOCK_REF_KEY:
+ fmt.Printf("\t\ttree block backref root %v\n",
+ btrfs.ObjID(ref.Offset))
+ case btrfsitem.SHARED_BLOCK_REF_KEY:
+ fmt.Printf("\t\tshared block backref parent %v\n",
+ ref.Offset)
+ default:
+ fmt.Printf("\t\t(error) unexpected empty sub-item type: %v\n", ref.Type)
+ }
+ case btrfsitem.ExtentDataRef:
+ fmt.Printf("\t\textent data backref root %v objectid %v offset %v count %v\n",
+ subitem.Root, subitem.ObjectID, subitem.Offset, subitem.Count)
+ case btrfsitem.SharedDataRef:
+ fmt.Printf("\t\tshared data backref parent %v count %v\n",
+ ref.Offset, subitem.Count)
+ default:
+ fmt.Printf("\t\t(error) unexpected sub-item type: %T\n", subitem)
+ }
+ }
+}
+
+// mimics print-tree.c:btrfs_print_key()
+func FmtKey(key btrfs.Key) string {
+ var out strings.Builder
+ fmt.Fprintf(&out, "key (%v %v", key.ObjectID.Format(key.ItemType), key.ItemType)
+ switch key.ItemType {
+ case btrfsitem.QGROUP_RELATION_KEY: //TODO, btrfsitem.QGROUP_INFO_KEY, btrfsitem.QGROUP_LIMIT_KEY:
+ panic("not implemented")
+ case btrfsitem.UUID_SUBVOL_KEY, btrfsitem.UUID_RECEIVED_SUBVOL_KEY:
+ fmt.Fprintf(&out, " %#08x)", key.Offset)
+ case btrfsitem.ROOT_ITEM_KEY:
+ fmt.Fprintf(&out, " %v)", btrfs.ObjID(key.Offset))
+ default:
+ if key.Offset == util.MaxUint64pp-1 {
+ fmt.Fprintf(&out, " -1)")
+ } else {
+ fmt.Fprintf(&out, " %v)", key.Offset)
+ }
+ }
+ return out.String()
+}
+
+func fmtTime(t btrfs.Time) string {
+ return fmt.Sprintf("%v.%v (%v)",
+ t.Sec, t.NSec, t.ToStd().Format("2006-01-02 15:04:05"))
+}
diff --git a/lib/btrfsmisc/walk.go b/lib/btrfsmisc/walk.go
new file mode 100644
index 0000000..ba0444f
--- /dev/null
+++ b/lib/btrfsmisc/walk.go
@@ -0,0 +1,115 @@
+package btrfsmisc
+
+import (
+ "fmt"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsitem"
+ "git.lukeshu.com/btrfs-progs-ng/lib/btrfs/btrfsvol"
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type WalkErr struct {
+ TreeName string
+ Path btrfs.TreePath
+ Err error
+}
+
+func (e WalkErr) Unwrap() error { return e.Err }
+
+func (e WalkErr) Error() string {
+ if len(e.Path) == 0 {
+ return fmt.Sprintf("%v: %v", e.TreeName, e.Err)
+ }
+ return fmt.Sprintf("%v: %v: %v", e.TreeName, e.Path, e.Err)
+}
+
+type WalkAllTreesHandler struct {
+ Err func(error)
+ // Callbacks for entire trees
+ PreTree func(name string, id btrfs.ObjID)
+ PostTree func(name string, id btrfs.ObjID)
+ // Callbacks for nodes or smaller
+ UnsafeNodes bool
+ btrfs.TreeWalkHandler
+}
+
+// WalkAllTrees walks all trees in a *btrfs.FS. Rather than returning
+// an error, it calls errCb each time an error is encountered. The
+// error will always be of type WalkErr.
+func WalkAllTrees(fs *btrfs.FS, cbs WalkAllTreesHandler) {
+ var treeName string
+ handleErr := func(path btrfs.TreePath, err error) {
+ cbs.Err(WalkErr{
+ TreeName: treeName,
+ Path: path,
+ Err: err,
+ })
+ }
+
+ trees := []struct {
+ Name string
+ ID btrfs.ObjID
+ }{
+ {
+ Name: "root tree",
+ ID: btrfs.ROOT_TREE_OBJECTID,
+ },
+ {
+ Name: "chunk tree",
+ ID: btrfs.CHUNK_TREE_OBJECTID,
+ },
+ {
+ Name: "log tree",
+ ID: btrfs.TREE_LOG_OBJECTID,
+ },
+ {
+ Name: "block group tree",
+ ID: btrfs.BLOCK_GROUP_TREE_OBJECTID,
+ },
+ }
+ origItem := cbs.Item
+ cbs.Item = func(path btrfs.TreePath, item btrfs.Item) error {
+ if item.Head.Key.ItemType == btrfsitem.ROOT_ITEM_KEY {
+ trees = append(trees, struct {
+ Name string
+ ID btrfs.ObjID
+ }{
+ Name: fmt.Sprintf("tree %v (via %v %v)",
+ item.Head.Key.ObjectID.Format(0), treeName, path),
+ ID: item.Head.Key.ObjectID,
+ })
+ }
+ if origItem != nil {
+ return origItem(path, item)
+ }
+ return nil
+ }
+
+ if !cbs.UnsafeNodes {
+ origNode := cbs.Node
+ cbs.Node = func(path btrfs.TreePath, node *util.Ref[btrfsvol.LogicalAddr, btrfs.Node], err error) error {
+ if err != nil {
+ handleErr(path, err)
+ }
+ if node != nil && origNode != nil {
+ return origNode(path, node, nil)
+ }
+ return nil
+ }
+ }
+
+ for i := 0; i < len(trees); i++ {
+ tree := trees[i]
+ treeName = tree.Name
+ if cbs.PreTree != nil {
+ cbs.PreTree(treeName, tree.ID)
+ }
+ if err := fs.TreeWalk(tree.ID, cbs.TreeWalkHandler); err != nil {
+ handleErr(nil, err)
+ }
+ if cbs.PostTree != nil {
+ cbs.PostTree(treeName, tree.ID)
+ }
+ }
+}
diff --git a/lib/linux/stat.go b/lib/linux/stat.go
new file mode 100644
index 0000000..c4d4ad9
--- /dev/null
+++ b/lib/linux/stat.go
@@ -0,0 +1,95 @@
+// Based on https://github.com/datawire/ocibuild/blob/master/pkg/python/stat.go
+
+package linux
+
+type StatMode uint32
+
+//nolint:deadcode,varcheck // not all of these modes will be used
+const (
+ // 16 bits = 5⅓ octal characters
+
+ ModeFmt StatMode = 0o17_0000 // mask for the type bits
+
+ _ModeFmtUnused000 StatMode = 0o00_0000
+ ModeFmtNamedPipe StatMode = 0o01_0000 // type: named pipe (FIFO)
+ ModeFmtCharDevice StatMode = 0o02_0000 // type: character device
+ _ModeFmtUnused003 StatMode = 0o03_0000
+ ModeFmtDir StatMode = 0o04_0000 // type: directory
+ _ModeFmtUnused005 StatMode = 0o05_0000
+ ModeFmtBlockDevice StatMode = 0o06_0000 // type: block device
+ _ModeFmtUnused007 StatMode = 0o07_0000
+ ModeFmtRegular StatMode = 0o10_0000 // type: regular file
+ _ModeFmtUnused011 StatMode = 0o11_0000
+ ModeFmtSymlink StatMode = 0o12_0000 // type: symbolic link
+ _ModeFmtUnused013 StatMode = 0o13_0000
+ ModeFmtSocket StatMode = 0o14_0000 // type: socket file
+ _ModeFmtUnused015 StatMode = 0o15_0000
+ _ModeFmtUnused016 StatMode = 0o16_0000
+ _ModeFmtUnused017 StatMode = 0o17_0000
+
+ ModePerm StatMode = 0o00_7777 // mask for permission bits
+
+ ModePermSetUID StatMode = 0o00_4000 // permission: set user id
+ ModePermSetGID StatMode = 0o00_2000 // permission: set group ID
+ ModePermSticky StatMode = 0o00_1000 // permission: sticky bit
+
+ ModePermUsrR StatMode = 0o00_0400 // permission: user: read
+ ModePermUsrW StatMode = 0o00_0200 // permission: user: write
+ ModePermUsrX StatMode = 0o00_0100 // permission: user: execute
+
+ ModePermGrpR StatMode = 0o00_0040 // permission: group: read
+ ModePermGrpW StatMode = 0o00_0020 // permission: group: write
+ ModePermGrpX StatMode = 0o00_0010 // permission: group: execute
+
+ ModePermOthR StatMode = 0o00_0004 // permission: other: read
+ ModePermOthW StatMode = 0o00_0002 // permission: other: write
+ ModePermOthX StatMode = 0o00_0001 // permission: other: execute
+)
+
+// IsDir reports whether mode describes a directory.
+//
+// That is, it tests that the ModeFmt bits are set to ModeFmtDir.
+func (mode StatMode) IsDir() bool {
+ return mode&ModeFmt == ModeFmtDir
+}
+
+// IsRegular reports whether m describes a regular file.
+//
+// That is, it tests that the ModeFmt bits are set to ModeFmtRegular.
+func (mode StatMode) IsRegular() bool {
+ return mode&ModeFmt == ModeFmtRegular
+}
+
+// String returns a textual representation of the mode.
+//
+// This is the format that POSIX specifies for showing the mode in the
+// output of the `ls -l` command. POSIX does not specify the
+// character to use to indicate a ModeFmtSocket file; this method uses
+// 's' (GNU `ls` behavior; though POSIX notes that many
+// implementations use '=' for sockets).
+func (mode StatMode) String() string {
+ buf := [10]byte{
+ // type: This string is easy; it directly pairs with
+ // the above ModeFmtXXX list above; the character in
+ // the string left-to-right corresponds with the
+ // constant in the list top-to-bottom.
+ "?pc?d?b?-?l?s???"[mode>>12],
+
+ // owner
+ "-r"[(mode>>8)&0o1],
+ "-w"[(mode>>7)&0o1],
+ "-xSs"[((mode>>6)&0o1)|((mode>>10)&0o2)],
+
+ // group
+ "-r"[(mode>>5)&0o1],
+ "-w"[(mode>>4)&0o1],
+ "-xSs"[((mode>>3)&0o1)|((mode>>9)&0o2)],
+
+ // group
+ "-r"[(mode>>2)&0o1],
+ "-w"[(mode>>1)&0o1],
+ "-xTt"[((mode>>0)&0o1)|((mode>>8)&0o2)],
+ }
+
+ return string(buf[:])
+}
diff --git a/lib/rbtree/print_test.go b/lib/rbtree/print_test.go
new file mode 100644
index 0000000..3e37cf2
--- /dev/null
+++ b/lib/rbtree/print_test.go
@@ -0,0 +1,41 @@
+package rbtree
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+func (t *Tree[K, V]) ASCIIArt() string {
+ var out strings.Builder
+ t.root.asciiArt(&out, "", "", "")
+ return out.String()
+}
+
+func (node *Node[V]) String() string {
+ switch {
+ case node == nil:
+ return "nil"
+ case node.Color == Red:
+ return fmt.Sprintf("R(%v)", node.Value)
+ default:
+ return fmt.Sprintf("B(%v)", node.Value)
+ }
+}
+
+func (node *Node[V]) asciiArt(w io.Writer, u, m, l string) {
+ if node == nil {
+ fmt.Fprintf(w, "%snil\n", m)
+ return
+ }
+
+ node.Right.asciiArt(w, u+" ", u+" ,--", u+" | ")
+
+ if node.Color == Red {
+ fmt.Fprintf(w, "%s%v\n", m, node)
+ } else {
+ fmt.Fprintf(w, "%s%v\n", m, node)
+ }
+
+ node.Left.asciiArt(w, l+" | ", l+" `--", l+" ")
+}
diff --git a/lib/rbtree/rbtree.go b/lib/rbtree/rbtree.go
new file mode 100644
index 0000000..7927307
--- /dev/null
+++ b/lib/rbtree/rbtree.go
@@ -0,0 +1,477 @@
+package rbtree
+
+import (
+ "fmt"
+
+ "golang.org/x/exp/constraints"
+)
+
+type Color bool
+
+const (
+ Black = Color(false)
+ Red = Color(true)
+)
+
+type Node[V any] struct {
+ Parent, Left, Right *Node[V]
+
+ Color Color
+
+ Value V
+}
+
+func (node *Node[V]) getColor() Color {
+ if node == nil {
+ return Black
+ }
+ return node.Color
+}
+
+type Tree[K constraints.Ordered, V any] struct {
+ KeyFn func(V) K
+ root *Node[V]
+}
+
+func (t *Tree[K, V]) Walk(fn func(*Node[V]) error) error {
+ return t.root.walk(fn)
+}
+
+func (node *Node[V]) walk(fn func(*Node[V]) error) error {
+ if node == nil {
+ return nil
+ }
+ if err := node.Left.walk(fn); err != nil {
+ return err
+ }
+ if err := fn(node); err != nil {
+ return err
+ }
+ if err := node.Right.walk(fn); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Search the tree for a value that satisfied the given callbackk
+// function. A return value of 0 means to to return this value; <0
+// means to go left on the tree (the value is too high), >0 means to
+// go right on th etree (the value is too low).
+//
+// +-----+
+// | v=8 | == 0 : this is it
+// +-----+
+// / \
+// / \
+// <0 : go left >0 : go right
+// / \
+// +---+ +---+
+// | 7 | | 9 |
+// +---+ +---+
+//
+// Returns nil if no such value is found.
+//
+// Search is good for advanced lookup, like when a range of values is
+// acceptable. For simple exact-value lookup, use Lookup.
+func (t *Tree[K, V]) Search(fn func(V) int) *Node[V] {
+ ret, _ := t.root.search(fn)
+ return ret
+}
+
+func (node *Node[V]) search(fn func(V) int) (exact, nearest *Node[V]) {
+ var prev *Node[V]
+ for {
+ if node == nil {
+ return nil, prev
+ }
+ direction := fn(node.Value)
+ prev = node
+ switch {
+ case direction < 0:
+ node = node.Left
+ case direction == 0:
+ return node, nil
+ case direction > 0:
+ node = node.Right
+ }
+ }
+}
+
+func (t *Tree[K, V]) exactKey(key K) func(V) int {
+ return func(val V) int {
+ valKey := t.KeyFn(val)
+ switch {
+ case key < valKey:
+ return -1
+ case key > valKey:
+ return 1
+ default: // key == valKey:
+ return 0
+ }
+ }
+}
+
+// Lookup looks up the value for an exact key. If no such value
+// exists, nil is returned.
+func (t *Tree[K, V]) Lookup(key K) *Node[V] {
+ return t.Search(t.exactKey(key))
+}
+
+// Min returns the minimum value stored in the tree, or nil if the
+// tree is empty.
+func (t *Tree[K, V]) Min() *Node[V] {
+ return t.root.min()
+}
+
+func (node *Node[V]) min() *Node[V] {
+ if node == nil {
+ return nil
+ }
+ for {
+ if node.Left == nil {
+ return node
+ }
+ node = node.Left
+ }
+}
+
+// Max returns the maximum value stored in the tree, or nil if the
+// tree is empty.
+func (t *Tree[K, V]) Max() *Node[V] {
+ return t.root.max()
+}
+
+func (node *Node[V]) max() *Node[V] {
+ if node == nil {
+ return nil
+ }
+ for {
+ if node.Right == nil {
+ return node
+ }
+ node = node.Right
+ }
+}
+
+func (t *Tree[K, V]) Next(cur *Node[V]) *Node[V] {
+ return cur.next()
+}
+
+func (cur *Node[V]) next() *Node[V] {
+ if cur.Right != nil {
+ return cur.Right.min()
+ }
+ child, parent := cur, cur.Parent
+ for parent != nil && child == parent.Right {
+ child, parent = parent, parent.Parent
+ }
+ return parent
+}
+
+func (t *Tree[K, V]) Prev(cur *Node[V]) *Node[V] {
+ return cur.prev()
+}
+
+func (cur *Node[V]) prev() *Node[V] {
+ if cur.Left != nil {
+ return cur.Left.max()
+ }
+ child, parent := cur, cur.Parent
+ for parent != nil && child == parent.Left {
+ child, parent = parent, parent.Parent
+ }
+ return parent
+}
+
+func (t *Tree[K, V]) parentChild(node *Node[V]) **Node[V] {
+ switch {
+ case node.Parent == nil:
+ return &t.root
+ case node.Parent.Left == node:
+ return &node.Parent.Left
+ case node.Parent.Right == node:
+ return &node.Parent.Right
+ default:
+ panic(fmt.Errorf("node %p is not a child of its parent %p", node, node.Parent))
+ }
+}
+
+func (t *Tree[K, V]) leftRotate(x *Node[V]) {
+ // p p
+ // | |
+ // +---+ +---+
+ // | x | | y |
+ // +---+ +---+
+ // / \ => / \
+ // a +---+ +---+ c
+ // | y | | x |
+ // +---+ +---+
+ // / \ / \
+ // b c a b
+
+ // Define 'p', 'x', 'y', and 'b' per the above diagram.
+ p := x.Parent
+ pChild := t.parentChild(x)
+ y := x.Right
+ b := y.Left
+
+ // Move things around
+
+ y.Parent = p
+ *pChild = y
+
+ x.Parent = y
+ y.Left = x
+
+ if b != nil {
+ b.Parent = x
+ }
+ x.Right = b
+}
+
+func (t *Tree[K, V]) rightRotate(y *Node[V]) {
+ // | |
+ // +---+ +---+
+ // | y | | x |
+ // +---+ +---+
+ // / \ => / \
+ // +---+ c a +---+
+ // | x | | y |
+ // +---+ +---+
+ // / \ / \
+ // a b b c
+
+ // Define 'p', 'x', 'y', and 'b' per the above diagram.
+ p := y.Parent
+ pChild := t.parentChild(y)
+ x := y.Left
+ b := x.Right
+
+ // Move things around
+
+ x.Parent = p
+ *pChild = x
+
+ y.Parent = x
+ x.Right = y
+
+ if b != nil {
+ b.Parent = y
+ }
+ y.Left = b
+}
+
+func (t *Tree[K, V]) Insert(val V) {
+ // Naive-insert
+
+ key := t.KeyFn(val)
+ exact, parent := t.root.search(t.exactKey(key))
+ if exact != nil {
+ exact.Value = val
+ return
+ }
+
+ node := &Node[V]{
+ Color: Red,
+ Parent: parent,
+ Value: val,
+ }
+ if parent == nil {
+ t.root = node
+ } else if key < t.KeyFn(parent.Value) {
+ parent.Left = node
+ } else {
+ parent.Right = node
+ }
+
+ // Re-balance
+
+ for node.Parent.getColor() == Red {
+ if node.Parent == node.Parent.Parent.Left {
+ uncle := node.Parent.Parent.Right
+ if uncle.getColor() == Red {
+ node.Parent.Color = Black
+ uncle.Color = Black
+ node.Parent.Parent.Color = Red
+ node = node.Parent.Parent
+ } else {
+ if node == node.Parent.Right {
+ node = node.Parent
+ t.leftRotate(node)
+ }
+ node.Parent.Color = Black
+ node.Parent.Parent.Color = Red
+ t.rightRotate(node.Parent.Parent)
+ }
+ } else {
+ uncle := node.Parent.Parent.Left
+ if uncle.getColor() == Red {
+ node.Parent.Color = Black
+ uncle.Color = Black
+ node.Parent.Parent.Color = Red
+ node = node.Parent.Parent
+ } else {
+ if node == node.Parent.Left {
+ node = node.Parent
+ t.rightRotate(node)
+ }
+ node.Parent.Color = Black
+ node.Parent.Parent.Color = Red
+ t.leftRotate(node.Parent.Parent)
+ }
+ }
+ }
+ t.root.Color = Black
+}
+
+func (t *Tree[K, V]) transplant(old, new *Node[V]) {
+ *t.parentChild(old) = new
+ if new != nil {
+ new.Parent = old.Parent
+ }
+}
+
+func (t *Tree[K, V]) Delete(key K) {
+ nodeToDelete := t.Lookup(key)
+ if nodeToDelete == nil {
+ return
+ }
+
+ var nodeToRebalance *Node[V]
+ var nodeToRebalanceParent *Node[V] // in case 'nodeToRebalance' is nil, which it can be
+ needsRebalance := nodeToDelete.Color == Black
+
+ switch {
+ case nodeToDelete.Left == nil:
+ nodeToRebalance = nodeToDelete.Right
+ nodeToRebalanceParent = nodeToDelete.Parent
+ t.transplant(nodeToDelete, nodeToDelete.Right)
+ case nodeToDelete.Right == nil:
+ nodeToRebalance = nodeToDelete.Left
+ nodeToRebalanceParent = nodeToDelete.Parent
+ t.transplant(nodeToDelete, nodeToDelete.Left)
+ default:
+ // The node being deleted has a child on both sides,
+ // so we've go to reshuffle the parents a bit to make
+ // room for those children.
+ next := nodeToDelete.next()
+ if next.Parent == nodeToDelete {
+ // p p
+ // | |
+ // +-----+ +-----+
+ // | ntd | | nxt |
+ // +-----+ +-----+
+ // / \ => / \
+ // a +-----+ a b
+ // | nxt |
+ // +-----+
+ // / \
+ // nil b
+ nodeToRebalance = next.Right
+ nodeToRebalanceParent = next
+
+ *t.parentChild(nodeToDelete) = next
+ next.Parent = nodeToDelete.Parent
+
+ next.Left = nodeToDelete.Left
+ next.Left.Parent = next
+ } else {
+ // p p
+ // | |
+ // +-----+ +-----+
+ // | ntd | | nxt |
+ // +-----+ +-----+
+ // / \ / \
+ // a x a x
+ // / \ => / \
+ // y z y z
+ // / \ / \
+ // +-----+ c b c
+ // | nxt |
+ // +-----+
+ // / \
+ // nil b
+ y := next.Parent
+ b := next.Right
+ nodeToRebalance = b
+ nodeToRebalanceParent = y
+
+ *t.parentChild(nodeToDelete) = next
+ next.Parent = nodeToDelete.Parent
+
+ next.Left = nodeToDelete.Left
+ next.Left.Parent = next
+
+ next.Right = nodeToDelete.Right
+ next.Right.Parent = next
+
+ y.Left = b
+ if b != nil {
+ b.Parent = y
+ }
+ }
+
+ // idk
+ needsRebalance = next.Color == Black
+ next.Color = nodeToDelete.Color
+ }
+
+ if needsRebalance {
+ node := nodeToRebalance
+ nodeParent := nodeToRebalanceParent
+ for node != t.root && node.getColor() == Black {
+ if node == nodeParent.Left {
+ sibling := nodeParent.Right
+ if sibling.getColor() == Red {
+ sibling.Color = Black
+ nodeParent.Color = Red
+ t.leftRotate(nodeParent)
+ sibling = nodeParent.Right
+ }
+ if sibling.Left.getColor() == Black && sibling.Right.getColor() == Black {
+ sibling.Color = Red
+ node, nodeParent = nodeParent, nodeParent.Parent
+ } else {
+ if sibling.Right.getColor() == Black {
+ sibling.Left.Color = Black
+ sibling.Color = Red
+ t.rightRotate(sibling)
+ sibling = nodeParent.Right
+ }
+ sibling.Color = nodeParent.Color
+ nodeParent.Color = Black
+ sibling.Right.Color = Black
+ t.leftRotate(nodeParent)
+ node, nodeParent = t.root, nil
+ }
+ } else {
+ sibling := nodeParent.Left
+ if sibling.getColor() == Red {
+ sibling.Color = Black
+ nodeParent.Color = Red
+ t.rightRotate(nodeParent)
+ sibling = nodeParent.Left
+ }
+ if sibling.Right.getColor() == Black && sibling.Left.getColor() == Black {
+ sibling.Color = Red
+ node, nodeParent = nodeParent, nodeParent.Parent
+ } else {
+ if sibling.Left.getColor() == Black {
+ sibling.Right.Color = Black
+ sibling.Color = Red
+ t.leftRotate(sibling)
+ sibling = nodeParent.Left
+ }
+ sibling.Color = nodeParent.Color
+ nodeParent.Color = Black
+ sibling.Left.Color = Black
+ t.rightRotate(nodeParent)
+ node, nodeParent = t.root, nil
+ }
+ }
+ }
+ if node != nil {
+ node.Color = Black
+ }
+ }
+}
diff --git a/lib/rbtree/rbtree_test.go b/lib/rbtree/rbtree_test.go
new file mode 100644
index 0000000..9b8e02c
--- /dev/null
+++ b/lib/rbtree/rbtree_test.go
@@ -0,0 +1,126 @@
+package rbtree
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/constraints"
+)
+
+func checkTree[K constraints.Ordered, V any](t *testing.T, expectedSet map[K]struct{}, tree *Tree[K, V]) {
+ // 1. Every node is either red or black
+
+ // 2. The root is black.
+ require.Equal(t, Black, tree.root.getColor())
+
+ // 3. Every nil is black.
+
+ // 4. If a node is red, then both its children are black.
+ require.NoError(t, tree.Walk(func(node *Node[V]) error {
+ if node.getColor() == Red {
+ require.Equal(t, Black, node.Left.getColor())
+ require.Equal(t, Black, node.Right.getColor())
+ }
+ return nil
+ }))
+
+ // 5. For each node, all simple paths from the node to
+ // descendent leaves contain the same number of black
+ // nodes.
+ var walkCnt func(node *Node[V], cnt int, leafFn func(int))
+ walkCnt = func(node *Node[V], cnt int, leafFn func(int)) {
+ if node.getColor() == Black {
+ cnt++
+ }
+ if node == nil {
+ leafFn(cnt)
+ return
+ }
+ walkCnt(node.Left, cnt, leafFn)
+ walkCnt(node.Right, cnt, leafFn)
+ }
+ require.NoError(t, tree.Walk(func(node *Node[V]) error {
+ var cnts []int
+ walkCnt(node, 0, func(cnt int) {
+ cnts = append(cnts, cnt)
+ })
+ for i := range cnts {
+ if cnts[0] != cnts[i] {
+ require.Truef(t, false, "node %v: not all leafs have same black-count: %v", node.Value, cnts)
+ break
+ }
+ }
+ return nil
+ }))
+
+ // expected contents
+ expectedOrder := make([]K, 0, len(expectedSet))
+ for k := range expectedSet {
+ expectedOrder = append(expectedOrder, k)
+ node := tree.Lookup(k)
+ require.NotNil(t, tree, node)
+ require.Equal(t, k, tree.KeyFn(node.Value))
+ }
+ sort.Slice(expectedOrder, func(i, j int) bool {
+ return expectedOrder[i] < expectedOrder[j]
+ })
+ actOrder := make([]K, 0, len(expectedSet))
+ require.NoError(t, tree.Walk(func(node *Node[V]) error {
+ actOrder = append(actOrder, tree.KeyFn(node.Value))
+ return nil
+ }))
+ require.Equal(t, expectedOrder, actOrder)
+}
+
+func FuzzTree(f *testing.F) {
+ Ins := uint8(0b0100_0000)
+ Del := uint8(0)
+
+ f.Add([]uint8{})
+ f.Add([]uint8{Ins | 5, Del | 5})
+ f.Add([]uint8{Ins | 5, Del | 6})
+ f.Add([]uint8{Del | 6})
+
+ f.Add([]uint8{ // CLRS Figure 14.4
+ Ins | 1,
+ Ins | 2,
+ Ins | 5,
+ Ins | 7,
+ Ins | 8,
+ Ins | 11,
+ Ins | 14,
+ Ins | 15,
+
+ Ins | 4,
+ })
+
+ f.Fuzz(func(t *testing.T, dat []uint8) {
+ tree := &Tree[uint8, uint8]{
+ KeyFn: func(x uint8) uint8 { return x },
+ }
+ set := make(map[uint8]struct{})
+ checkTree(t, set, tree)
+ t.Logf("\n%s\n", tree.ASCIIArt())
+ for _, b := range dat {
+ ins := (b & 0b0100_0000) != 0
+ val := (b & 0b0011_1111)
+ if ins {
+ t.Logf("Insert(%v)", val)
+ tree.Insert(val)
+ set[val] = struct{}{}
+ t.Logf("\n%s\n", tree.ASCIIArt())
+ node := tree.Lookup(val)
+ require.NotNil(t, node)
+ require.Equal(t, val, node.Value)
+ } else {
+ t.Logf("Delete(%v)", val)
+ tree.Delete(val)
+ delete(set, val)
+ t.Logf("\n%s\n", tree.ASCIIArt())
+ require.Nil(t, tree.Lookup(val))
+ }
+ checkTree(t, set, tree)
+ }
+ })
+}
diff --git a/lib/rbtree/rbtree_util.go b/lib/rbtree/rbtree_util.go
new file mode 100644
index 0000000..252d8fe
--- /dev/null
+++ b/lib/rbtree/rbtree_util.go
@@ -0,0 +1,48 @@
+package rbtree
+
+import (
+ "reflect"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+// SearchRange is like Search, but returns all nodes that match the
+// function; assuming that they are contiguous.
+func (t *Tree[K, V]) SearchRange(fn func(V) int) []V {
+ middle := t.Search(fn)
+ if middle == nil {
+ return nil
+ }
+ ret := []V{middle.Value}
+ for node := t.Prev(middle); node != nil && fn(node.Value) == 0; node = t.Prev(node) {
+ ret = append(ret, node.Value)
+ }
+ util.ReverseSlice(ret)
+ for node := t.Next(middle); node != nil && fn(node.Value) == 0; node = t.Next(node) {
+ ret = append(ret, node.Value)
+ }
+ return ret
+}
+
+func (t *Tree[K, V]) Equal(u *Tree[K, V]) bool {
+ if (t == nil) != (u == nil) {
+ return false
+ }
+ if t == nil {
+ return true
+ }
+
+ var tSlice []V
+ _ = t.Walk(func(node *Node[V]) error {
+ tSlice = append(tSlice, node.Value)
+ return nil
+ })
+
+ var uSlice []V
+ _ = u.Walk(func(node *Node[V]) error {
+ uSlice = append(uSlice, node.Value)
+ return nil
+ })
+
+ return reflect.DeepEqual(tSlice, uSlice)
+}
diff --git a/lib/rbtree/testdata/fuzz/FuzzTree/be408ce7760bc8ced841300ea7e6bac1a1e9505b1535810083d18db95d86f489 b/lib/rbtree/testdata/fuzz/FuzzTree/be408ce7760bc8ced841300ea7e6bac1a1e9505b1535810083d18db95d86f489
new file mode 100644
index 0000000..40318c6
--- /dev/null
+++ b/lib/rbtree/testdata/fuzz/FuzzTree/be408ce7760bc8ced841300ea7e6bac1a1e9505b1535810083d18db95d86f489
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("aAm0BCrb0x00!0000000000000")
diff --git a/lib/rbtree/testdata/fuzz/FuzzTree/f9e6421dacf921f7bb25d402bffbfdce114baad0b1c8b9a9189b5a97fda27e41 b/lib/rbtree/testdata/fuzz/FuzzTree/f9e6421dacf921f7bb25d402bffbfdce114baad0b1c8b9a9189b5a97fda27e41
new file mode 100644
index 0000000..238e44f
--- /dev/null
+++ b/lib/rbtree/testdata/fuzz/FuzzTree/f9e6421dacf921f7bb25d402bffbfdce114baad0b1c8b9a9189b5a97fda27e41
@@ -0,0 +1,2 @@
+go test fuzz v1
+[]byte("YZAB\x990")
diff --git a/lib/util/bitfield.go b/lib/util/bitfield.go
new file mode 100644
index 0000000..23da17a
--- /dev/null
+++ b/lib/util/bitfield.go
@@ -0,0 +1,50 @@
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+type BitfieldFormat uint8
+
+const (
+ HexNone = BitfieldFormat(iota)
+ HexLower
+ HexUpper
+)
+
+func BitfieldString[T ~uint8 | ~uint16 | ~uint32 | ~uint64](bitfield T, bitnames []string, cfg BitfieldFormat) string {
+ var out strings.Builder
+ switch cfg {
+ case HexNone:
+ // do nothing
+ case HexLower:
+ fmt.Fprintf(&out, "0x%0x(", uint64(bitfield))
+ case HexUpper:
+ fmt.Fprintf(&out, "0x%0X(", uint64(bitfield))
+ }
+ if bitfield == 0 {
+ out.WriteString("none")
+ } else {
+ rest := bitfield
+ first := true
+ for i := 0; rest != 0; i++ {
+ if rest&(1<<i) != 0 {
+ if !first {
+ out.WriteRune('|')
+ }
+ if i < len(bitnames) {
+ out.WriteString(bitnames[i])
+ } else {
+ fmt.Fprintf(&out, "(1<<%v)", i)
+ }
+ first = false
+ }
+ rest &^= 1 << i
+ }
+ }
+ if cfg != HexNone {
+ out.WriteRune(')')
+ }
+ return out.String()
+}
diff --git a/lib/util/fmt.go b/lib/util/fmt.go
new file mode 100644
index 0000000..af7404c
--- /dev/null
+++ b/lib/util/fmt.go
@@ -0,0 +1,67 @@
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+// FmtStateString returns the fmt.Printf string that produced a given
+// fmt.State and verb.
+func FmtStateString(st fmt.State, verb rune) string {
+ var ret strings.Builder
+ ret.WriteByte('%')
+ for _, flag := range []int{'-', '+', '#', ' ', '0'} {
+ if st.Flag(flag) {
+ ret.WriteByte(byte(flag))
+ }
+ }
+ if width, ok := st.Width(); ok {
+ fmt.Fprintf(&ret, "%v", width)
+ }
+ if prec, ok := st.Precision(); ok {
+ if prec == 0 {
+ ret.WriteByte('.')
+ } else {
+ fmt.Fprintf(&ret, ".%v", prec)
+ }
+ }
+ ret.WriteRune(verb)
+ return ret.String()
+}
+
+// FormatByteArrayStringer is function for helping to implement
+// fmt.Formatter for []byte or [n]byte types that have a custom string
+// representation. Use it like:
+//
+// type MyType [16]byte
+//
+// func (val MyType) String() string {
+// …
+// }
+//
+// func (val MyType) Format(f fmt.State, verb rune) {
+// util.FormatByteArrayStringer(val, val[:], f, verb)
+// }
+func FormatByteArrayStringer(
+ obj interface {
+ fmt.Stringer
+ fmt.Formatter
+ },
+ objBytes []byte,
+ f fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if !f.Flag('#') {
+ FormatByteArrayStringer(obj, objBytes, f, 's') // as a string
+ } else {
+ byteStr := fmt.Sprintf("%#v", objBytes)
+ objType := fmt.Sprintf("%T", obj)
+ objStr := objType + strings.TrimPrefix(byteStr, "[]byte")
+ fmt.Fprintf(f, FmtStateString(f, 's'), objStr)
+ }
+ case 's', 'q': // string
+ fmt.Fprintf(f, FmtStateString(f, verb), obj.String())
+ default:
+ fmt.Fprintf(f, FmtStateString(f, verb), objBytes)
+ }
+}
diff --git a/lib/util/fmt_test.go b/lib/util/fmt_test.go
new file mode 100644
index 0000000..4251ecf
--- /dev/null
+++ b/lib/util/fmt_test.go
@@ -0,0 +1,99 @@
+package util_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+type FmtState struct {
+ MWidth int
+ MPrec int
+ MFlagMinus bool
+ MFlagPlus bool
+ MFlagSharp bool
+ MFlagSpace bool
+ MFlagZero bool
+}
+
+func (st FmtState) Width() (int, bool) {
+ if st.MWidth < 1 {
+ return 0, false
+ }
+ return st.MWidth, true
+}
+
+func (st FmtState) Precision() (int, bool) {
+ if st.MPrec < 1 {
+ return 0, false
+ }
+ return st.MPrec, true
+}
+
+func (st FmtState) Flag(b int) bool {
+ switch b {
+ case '-':
+ return st.MFlagMinus
+ case '+':
+ return st.MFlagPlus
+ case '#':
+ return st.MFlagSharp
+ case ' ':
+ return st.MFlagSpace
+ case '0':
+ return st.MFlagZero
+ }
+ return false
+}
+
+func (st FmtState) Write([]byte) (int, error) {
+ panic("not implemented")
+}
+
+func (dst *FmtState) Format(src fmt.State, verb rune) {
+ if width, ok := src.Width(); ok {
+ dst.MWidth = width
+ }
+ if prec, ok := src.Precision(); ok {
+ dst.MPrec = prec
+ }
+ dst.MFlagMinus = src.Flag('-')
+ dst.MFlagPlus = src.Flag('+')
+ dst.MFlagSharp = src.Flag('#')
+ dst.MFlagSpace = src.Flag(' ')
+ dst.MFlagZero = src.Flag('0')
+}
+
+// letters only? No 'p', 'T', or 'w'.
+const verbs = "abcdefghijklmnoqrstuvxyzABCDEFGHIJKLMNOPQRSUVWXYZ"
+
+func FuzzFmtStateString(f *testing.F) {
+ f.Fuzz(func(t *testing.T,
+ width, prec uint8,
+ flagMinus, flagPlus, flagSharp, flagSpace, flagZero bool,
+ verbIdx uint8,
+ ) {
+ if flagMinus {
+ flagZero = false
+ }
+ input := FmtState{
+ MWidth: int(width),
+ MPrec: int(prec),
+ MFlagMinus: flagMinus,
+ MFlagPlus: flagPlus,
+ MFlagSharp: flagSharp,
+ MFlagSpace: flagSpace,
+ MFlagZero: flagZero,
+ }
+ verb := rune(verbs[int(verbIdx)%len(verbs)])
+
+ t.Logf("(%#v, %c) => %q", input, verb, util.FmtStateString(input, verb))
+
+ var output FmtState
+ assert.Equal(t, "", fmt.Sprintf(util.FmtStateString(input, verb), &output))
+ assert.Equal(t, input, output)
+ })
+}
diff --git a/lib/util/generic.go b/lib/util/generic.go
new file mode 100644
index 0000000..6882724
--- /dev/null
+++ b/lib/util/generic.go
@@ -0,0 +1,122 @@
+package util
+
+import (
+ "sort"
+ "sync"
+
+ "golang.org/x/exp/constraints"
+)
+
+func InSlice[T comparable](needle T, haystack []T) bool {
+ for _, straw := range haystack {
+ if needle == straw {
+ return true
+ }
+ }
+ return false
+}
+
+func RemoveAllFromSlice[T comparable](haystack []T, needle T) []T {
+ for i, straw := range haystack {
+ if needle == straw {
+ return append(
+ haystack[:i],
+ RemoveAllFromSlice(haystack[i+1:], needle)...)
+ }
+ }
+ return haystack
+}
+
+func RemoveAllFromSliceFunc[T any](haystack []T, f func(T) bool) []T {
+ for i, straw := range haystack {
+ if f(straw) {
+ return append(
+ haystack[:i],
+ RemoveAllFromSliceFunc(haystack[i+1:], f)...)
+ }
+ }
+ return haystack
+}
+
+func ReverseSlice[T any](slice []T) {
+ for i := 0; i < len(slice)/2; i++ {
+ j := (len(slice) - 1) - i
+ slice[i], slice[j] = slice[j], slice[i]
+ }
+}
+
+func Max[T constraints.Ordered](a, b T) T {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func Min[T constraints.Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func MapKeys[K comparable, V any](m map[K]V) []K {
+ ret := make([]K, 0, len(m))
+ for k := range m {
+ ret = append(ret, k)
+ }
+ return ret
+}
+
+func SortSlice[T constraints.Ordered](slice []T) {
+ sort.Slice(slice, func(i, j int) bool {
+ return slice[i] < slice[j]
+ })
+}
+
+func SortedMapKeys[K constraints.Ordered, V any](m map[K]V) []K {
+ ret := MapKeys(m)
+ SortSlice(ret)
+ return ret
+}
+
+func CmpUint[T constraints.Unsigned](a, b T) int {
+ switch {
+ case a < b:
+ return -1
+ case a == b:
+ return 0
+ default:
+ return 1
+ }
+}
+
+type SyncMap[K comparable, V any] struct {
+ inner sync.Map
+}
+
+func (m *SyncMap[K, V]) Delete(key K) { m.inner.Delete(key) }
+func (m *SyncMap[K, V]) Load(key K) (value V, ok bool) {
+ _value, ok := m.inner.Load(key)
+ if ok {
+ value = _value.(V)
+ }
+ return value, ok
+}
+func (m *SyncMap[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
+ _value, ok := m.inner.LoadAndDelete(key)
+ if ok {
+ value = _value.(V)
+ }
+ return value, ok
+}
+func (m *SyncMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
+ _actual, loaded := m.inner.LoadOrStore(key, value)
+ actual = _actual.(V)
+ return actual, loaded
+}
+func (m *SyncMap[K, V]) Range(f func(key K, value V) bool) {
+ m.inner.Range(func(key, value any) bool {
+ return f(key.(K), value.(V))
+ })
+}
+func (m *SyncMap[K, V]) Store(key K, value V) { m.inner.Store(key, value) }
diff --git a/lib/util/int.go b/lib/util/int.go
new file mode 100644
index 0000000..fab553d
--- /dev/null
+++ b/lib/util/int.go
@@ -0,0 +1,3 @@
+package util
+
+const MaxUint64pp = 0x1_00000000_00000000
diff --git a/lib/util/lru.go b/lib/util/lru.go
new file mode 100644
index 0000000..2b62e69
--- /dev/null
+++ b/lib/util/lru.go
@@ -0,0 +1,73 @@
+package util
+
+import (
+ "sync"
+
+ lru "github.com/hashicorp/golang-lru"
+)
+
+type LRUCache[K comparable, V any] struct {
+ initOnce sync.Once
+ inner *lru.ARCCache
+}
+
+func (c *LRUCache[K, V]) init() {
+ c.initOnce.Do(func() {
+ c.inner, _ = lru.NewARC(128)
+ })
+}
+
+func (c *LRUCache[K, V]) Add(key K, value V) {
+ c.init()
+ c.inner.Add(key, value)
+}
+func (c *LRUCache[K, V]) Contains(key K) bool {
+ c.init()
+ return c.inner.Contains(key)
+}
+func (c *LRUCache[K, V]) Get(key K) (value V, ok bool) {
+ c.init()
+ _value, ok := c.inner.Get(key)
+ if ok {
+ value = _value.(V)
+ }
+ return value, ok
+}
+func (c *LRUCache[K, V]) Keys() []K {
+ c.init()
+ untyped := c.inner.Keys()
+ typed := make([]K, len(untyped))
+ for i := range untyped {
+ typed[i] = untyped[i].(K)
+ }
+ return typed
+}
+func (c *LRUCache[K, V]) Len() int {
+ c.init()
+ return c.inner.Len()
+}
+func (c *LRUCache[K, V]) Peek(key K) (value V, ok bool) {
+ c.init()
+ _value, ok := c.inner.Peek(key)
+ if ok {
+ value = _value.(V)
+ }
+ return value, ok
+}
+func (c *LRUCache[K, V]) Purge() {
+ c.init()
+ c.inner.Purge()
+}
+func (c *LRUCache[K, V]) Remove(key K) {
+ c.init()
+ c.inner.Remove(key)
+}
+
+func (c *LRUCache[K, V]) GetOrElse(key K, fn func() V) V {
+ var value V
+ var ok bool
+ for value, ok = c.Get(key); !ok; value, ok = c.Get(key) {
+ c.Add(key, fn())
+ }
+ return value
+}
diff --git a/lib/util/ref.go b/lib/util/ref.go
new file mode 100644
index 0000000..1ac48c9
--- /dev/null
+++ b/lib/util/ref.go
@@ -0,0 +1,54 @@
+package util
+
+import (
+ "fmt"
+ "io"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/binstruct"
+)
+
+type File[A ~int64] interface {
+ Name() string
+ Size() (A, error)
+ ReadAt(p []byte, off A) (n int, err error)
+ WriteAt(p []byte, off A) (n int, err error)
+}
+
+var (
+ _ io.WriterAt = File[int64](nil)
+ _ io.ReaderAt = File[int64](nil)
+)
+
+type Ref[A ~int64, T any] struct {
+ File File[A]
+ Addr A
+ Data T
+}
+
+func (r *Ref[A, T]) Read() error {
+ size := binstruct.StaticSize(r.Data)
+ buf := make([]byte, size)
+ if _, err := r.File.ReadAt(buf, r.Addr); err != nil {
+ return err
+ }
+ n, err := binstruct.Unmarshal(buf, &r.Data)
+ if err != nil {
+ return err
+ }
+ if n != size {
+ return fmt.Errorf("util.Ref[%T].Read: left over data: read %v bytes but only consumed %v",
+ r.Data, size, n)
+ }
+ return nil
+}
+
+func (r *Ref[A, T]) Write() error {
+ buf, err := binstruct.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+ if _, err = r.File.WriteAt(buf, r.Addr); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/lib/util/uuid.go b/lib/util/uuid.go
new file mode 100644
index 0000000..3b4cbaf
--- /dev/null
+++ b/lib/util/uuid.go
@@ -0,0 +1,72 @@
+package util
+
+import (
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+type UUID [16]byte
+
+func (uuid UUID) String() string {
+ str := hex.EncodeToString(uuid[:])
+ return strings.Join([]string{
+ str[:8],
+ str[8:12],
+ str[12:16],
+ str[16:20],
+ str[20:32],
+ }, "-")
+}
+
+func (a UUID) Cmp(b UUID) int {
+ for i := range a {
+ if d := int(a[i]) - int(b[i]); d != 0 {
+ return d
+ }
+ }
+ return 0
+}
+
+func (uuid UUID) Format(f fmt.State, verb rune) {
+ FormatByteArrayStringer(uuid, uuid[:], f, verb)
+}
+
+func ParseUUID(str string) (UUID, error) {
+ var ret UUID
+ j := 0
+ for i := 0; i < len(str); i++ {
+ if j >= len(ret)*2 {
+ return UUID{}, fmt.Errorf("too long to be a UUID: %q|%q", str[:i], str[i:])
+ }
+ c := str[i]
+ var v byte
+ switch {
+ case '0' <= c && c <= '9':
+ v = c - '0'
+ case 'a' <= c && c <= 'f':
+ v = c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ v = c - 'A' + 10
+ case c == '-':
+ continue
+ default:
+ return UUID{}, fmt.Errorf("illegal byte in UUID: %q|%q|%q", str[:i], str[i:i+1], str[i+1:])
+ }
+ if j%2 == 0 {
+ ret[j/2] = v << 4
+ } else {
+ ret[j/2] = (ret[j/2] & 0xf0) | (v & 0x0f)
+ }
+ j++
+ }
+ return ret, nil
+}
+
+func MustParseUUID(str string) UUID {
+ ret, err := ParseUUID(str)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
diff --git a/lib/util/uuid_test.go b/lib/util/uuid_test.go
new file mode 100644
index 0000000..7e0e07a
--- /dev/null
+++ b/lib/util/uuid_test.go
@@ -0,0 +1,63 @@
+package util_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "git.lukeshu.com/btrfs-progs-ng/lib/util"
+)
+
+func TestParseUUID(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ Input string
+ OutputVal util.UUID
+ OutputErr string
+ }
+ testcases := map[string]TestCase{
+ "basic": {Input: "a0dd94ed-e60c-42e8-8632-64e8d4765a43", OutputVal: util.UUID{0xa0, 0xdd, 0x94, 0xed, 0xe6, 0x0c, 0x42, 0xe8, 0x86, 0x32, 0x64, 0xe8, 0xd4, 0x76, 0x5a, 0x43}},
+ "too-long": {Input: "a0dd94ed-e60c-42e8-8632-64e8d4765a43a", OutputErr: `too long to be a UUID: "a0dd94ed-e60c-42e8-8632-64e8d4765a43"|"a"`},
+ "bad char": {Input: "a0dd94ej-e60c-42e8-8632-64e8d4765a43a", OutputErr: `illegal byte in UUID: "a0dd94e"|"j"|"-e60c-42e8-8632-64e8d4765a43a"`},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ val, err := util.ParseUUID(tc.Input)
+ assert.Equal(t, tc.OutputVal, val)
+ if tc.OutputErr == "" {
+ assert.NoError(t, err)
+ } else {
+ assert.EqualError(t, err, tc.OutputErr)
+ }
+ })
+ }
+}
+
+func TestUUIDFormat(t *testing.T) {
+ t.Parallel()
+ type TestCase struct {
+ InputUUID util.UUID
+ InputFmt string
+ Output string
+ }
+ uuid := util.MustParseUUID("a0dd94ed-e60c-42e8-8632-64e8d4765a43")
+ testcases := map[string]TestCase{
+ "s": {InputUUID: uuid, InputFmt: "%s", Output: "a0dd94ed-e60c-42e8-8632-64e8d4765a43"},
+ "x": {InputUUID: uuid, InputFmt: "%x", Output: "a0dd94ede60c42e8863264e8d4765a43"},
+ "X": {InputUUID: uuid, InputFmt: "%X", Output: "A0DD94EDE60C42E8863264E8D4765A43"},
+ "v": {InputUUID: uuid, InputFmt: "%v", Output: "a0dd94ed-e60c-42e8-8632-64e8d4765a43"},
+ "40s": {InputUUID: uuid, InputFmt: "|% 40s", Output: "| a0dd94ed-e60c-42e8-8632-64e8d4765a43"},
+ "#115v": {InputUUID: uuid, InputFmt: "|%#115v", Output: "| util.UUID{0xa0, 0xdd, 0x94, 0xed, 0xe6, 0xc, 0x42, 0xe8, 0x86, 0x32, 0x64, 0xe8, 0xd4, 0x76, 0x5a, 0x43}"},
+ }
+ for tcName, tc := range testcases {
+ tc := tc
+ t.Run(tcName, func(t *testing.T) {
+ t.Parallel()
+ actual := fmt.Sprintf(tc.InputFmt, tc.InputUUID)
+ assert.Equal(t, tc.Output, actual)
+ })
+ }
+}