From d91f8ce17a6fc165fafd9dc921911233a69c34d2 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 9 Jan 2023 14:23:23 -0700 Subject: tree-wide: Migrate to the new ARCache --- lib/diskio/file_blockbuf.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'lib/diskio/file_blockbuf.go') diff --git a/lib/diskio/file_blockbuf.go b/lib/diskio/file_blockbuf.go index 77b823c..15ae13b 100644 --- a/lib/diskio/file_blockbuf.go +++ b/lib/diskio/file_blockbuf.go @@ -1,4 +1,4 @@ -// Copyright (C) 2022 Luke Shumaker +// Copyright (C) 2022-2023 Luke Shumaker // // SPDX-License-Identifier: GPL-2.0-or-later @@ -19,16 +19,18 @@ type bufferedFile[A ~int64] struct { inner File[A] mu sync.RWMutex blockSize A - blockCache *containers.LRUCache[A, bufferedBlock] + blockCache containers.ARCache[A, bufferedBlock] } var _ File[assertAddr] = (*bufferedFile[assertAddr])(nil) func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *bufferedFile[A] { return &bufferedFile[A]{ - inner: file, - blockSize: blockSize, - blockCache: containers.NewLRUCache[A, bufferedBlock](cacheSize), + inner: file, + blockSize: blockSize, + blockCache: containers.ARCache[A, bufferedBlock]{ + MaxLen: cacheSize, + }, } } @@ -53,13 +55,13 @@ func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error defer bf.mu.RUnlock() offsetWithinBlock := off % bf.blockSize blockOffset := off - offsetWithinBlock - cachedBlock, ok := bf.blockCache.Get(blockOffset) + cachedBlock, ok := bf.blockCache.Load(blockOffset) if !ok { cachedBlock.Dat = make([]byte, bf.blockSize) n, err := bf.inner.ReadAt(cachedBlock.Dat, blockOffset) cachedBlock.Dat = cachedBlock.Dat[:n] cachedBlock.Err = err - bf.blockCache.Add(blockOffset, cachedBlock) + bf.blockCache.Store(blockOffset, cachedBlock) } n = copy(dat, cachedBlock.Dat[offsetWithinBlock:]) if n < len(dat) { @@ -77,7 +79,7 @@ func (bf *bufferedFile[A]) WriteAt(dat []byte, off A) (n int, err error) { // Cache invalidation for blockOffset := off - (off % bf.blockSize); blockOffset < off+A(n); blockOffset += bf.blockSize { - bf.blockCache.Remove(blockOffset) + bf.blockCache.Delete(blockOffset) } return -- cgit v1.2.3-54-g00ecf From fd89ca3095fc93f503d0cff6e0c380b2b67502f4 Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Mon, 9 Jan 2023 14:35:41 -0700 Subject: diskio: BufferedFile: Add a buffer pool to avoid allocations --- lib/diskio/file_blockbuf.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'lib/diskio/file_blockbuf.go') diff --git a/lib/diskio/file_blockbuf.go b/lib/diskio/file_blockbuf.go index 15ae13b..3db3105 100644 --- a/lib/diskio/file_blockbuf.go +++ b/lib/diskio/file_blockbuf.go @@ -7,6 +7,8 @@ package diskio import ( "sync" + "git.lukeshu.com/go/typedsync" + "git.lukeshu.com/btrfs-progs-ng/lib/containers" ) @@ -20,18 +22,28 @@ type bufferedFile[A ~int64] struct { mu sync.RWMutex blockSize A blockCache containers.ARCache[A, bufferedBlock] + blockPool typedsync.Pool[[]byte] } var _ File[assertAddr] = (*bufferedFile[assertAddr])(nil) func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *bufferedFile[A] { - return &bufferedFile[A]{ + ret := &bufferedFile[A]{ inner: file, blockSize: blockSize, blockCache: containers.ARCache[A, bufferedBlock]{ MaxLen: cacheSize, }, + blockPool: typedsync.Pool[[]byte]{ + New: func() []byte { + return make([]byte, blockSize) + }, + }, + } + ret.blockCache.OnRemove = func(_ A, buf bufferedBlock) { + ret.blockPool.Put(buf.Dat) } + return ret } func (bf *bufferedFile[A]) Name() string { return bf.inner.Name() } @@ -57,7 +69,7 @@ func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error blockOffset := off - offsetWithinBlock cachedBlock, ok := bf.blockCache.Load(blockOffset) if !ok { - cachedBlock.Dat = make([]byte, bf.blockSize) + cachedBlock.Dat, _ = bf.blockPool.Get() n, err := bf.inner.ReadAt(cachedBlock.Dat, blockOffset) cachedBlock.Dat = cachedBlock.Dat[:n] cachedBlock.Err = err -- cgit v1.2.3-54-g00ecf From f6d5cc3e1ea4a9bb67b86e561f23e609f00727ec Mon Sep 17 00:00:00 2001 From: Luke Shumaker Date: Wed, 25 Jan 2023 10:49:42 -0700 Subject: diskio: BufferedFile: Refactor to avoid closures and parallel reads --- lib/diskio/file_blockbuf.go | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) (limited to 'lib/diskio/file_blockbuf.go') diff --git a/lib/diskio/file_blockbuf.go b/lib/diskio/file_blockbuf.go index 3db3105..b7db849 100644 --- a/lib/diskio/file_blockbuf.go +++ b/lib/diskio/file_blockbuf.go @@ -34,18 +34,30 @@ func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *buffer blockCache: containers.ARCache[A, bufferedBlock]{ MaxLen: cacheSize, }, - blockPool: typedsync.Pool[[]byte]{ - New: func() []byte { - return make([]byte, blockSize) - }, - }, - } - ret.blockCache.OnRemove = func(_ A, buf bufferedBlock) { - ret.blockPool.Put(buf.Dat) } + ret.blockPool.New = ret.malloc + ret.blockCache.OnRemove = ret.free + ret.blockCache.New = ret.readBlock return ret } +func (bf *bufferedFile[A]) malloc() []byte { + return make([]byte, bf.blockSize) +} + +func (bf *bufferedFile[A]) free(_ A, buf bufferedBlock) { + bf.blockPool.Put(buf.Dat) +} + +func (bf *bufferedFile[A]) readBlock(blockOffset A) bufferedBlock { + dat, _ := bf.blockPool.Get() + n, err := bf.inner.ReadAt(dat, blockOffset) + return bufferedBlock{ + Dat: dat[:n], + Err: err, + } +} + func (bf *bufferedFile[A]) Name() string { return bf.inner.Name() } func (bf *bufferedFile[A]) Size() A { return bf.inner.Size() } func (bf *bufferedFile[A]) Close() error { return bf.inner.Close() } @@ -67,14 +79,7 @@ func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error defer bf.mu.RUnlock() offsetWithinBlock := off % bf.blockSize blockOffset := off - offsetWithinBlock - cachedBlock, ok := bf.blockCache.Load(blockOffset) - if !ok { - cachedBlock.Dat, _ = bf.blockPool.Get() - n, err := bf.inner.ReadAt(cachedBlock.Dat, blockOffset) - cachedBlock.Dat = cachedBlock.Dat[:n] - cachedBlock.Err = err - bf.blockCache.Store(blockOffset, cachedBlock) - } + cachedBlock, _ := bf.blockCache.Load(blockOffset) n = copy(dat, cachedBlock.Dat[offsetWithinBlock:]) if n < len(dat) { return n, cachedBlock.Err -- cgit v1.2.3-54-g00ecf