summaryrefslogtreecommitdiff
path: root/lib/diskio/file_blockbuf.go
blob: 3db31054690b6181031ab077ae1ba4b49a89fa40 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
// Copyright (C) 2022-2023  Luke Shumaker <lukeshu@lukeshu.com>
//
// SPDX-License-Identifier: GPL-2.0-or-later

package diskio

import (
	"sync"

	"git.lukeshu.com/go/typedsync"

	"git.lukeshu.com/btrfs-progs-ng/lib/containers"
)

type bufferedBlock struct {
	Dat []byte
	Err error
}

type bufferedFile[A ~int64] struct {
	inner      File[A]
	mu         sync.RWMutex
	blockSize  A
	blockCache containers.ARCache[A, bufferedBlock]
	blockPool  typedsync.Pool[[]byte]
}

var _ File[assertAddr] = (*bufferedFile[assertAddr])(nil)

func NewBufferedFile[A ~int64](file File[A], blockSize A, cacheSize int) *bufferedFile[A] {
	ret := &bufferedFile[A]{
		inner:     file,
		blockSize: blockSize,
		blockCache: containers.ARCache[A, bufferedBlock]{
			MaxLen: cacheSize,
		},
		blockPool: typedsync.Pool[[]byte]{
			New: func() []byte {
				return make([]byte, blockSize)
			},
		},
	}
	ret.blockCache.OnRemove = func(_ A, buf bufferedBlock) {
		ret.blockPool.Put(buf.Dat)
	}
	return ret
}

func (bf *bufferedFile[A]) Name() string { return bf.inner.Name() }
func (bf *bufferedFile[A]) Size() A      { return bf.inner.Size() }
func (bf *bufferedFile[A]) Close() error { return bf.inner.Close() }

func (bf *bufferedFile[A]) ReadAt(dat []byte, off A) (n int, err error) {
	done := 0
	for done < len(dat) {
		n, err := bf.maybeShortReadAt(dat[done:], off+A(done))
		done += n
		if err != nil {
			return done, err
		}
	}
	return done, nil
}

func (bf *bufferedFile[A]) maybeShortReadAt(dat []byte, off A) (n int, err error) {
	bf.mu.RLock()
	defer bf.mu.RUnlock()
	offsetWithinBlock := off % bf.blockSize
	blockOffset := off - offsetWithinBlock
	cachedBlock, ok := bf.blockCache.Load(blockOffset)
	if !ok {
		cachedBlock.Dat, _ = bf.blockPool.Get()
		n, err := bf.inner.ReadAt(cachedBlock.Dat, blockOffset)
		cachedBlock.Dat = cachedBlock.Dat[:n]
		cachedBlock.Err = err
		bf.blockCache.Store(blockOffset, cachedBlock)
	}
	n = copy(dat, cachedBlock.Dat[offsetWithinBlock:])
	if n < len(dat) {
		return n, cachedBlock.Err
	}
	return n, nil
}

func (bf *bufferedFile[A]) WriteAt(dat []byte, off A) (n int, err error) {
	bf.mu.Lock()
	defer bf.mu.Unlock()

	// Do the work
	n, err = bf.inner.WriteAt(dat, off)

	// Cache invalidation
	for blockOffset := off - (off % bf.blockSize); blockOffset < off+A(n); blockOffset += bf.blockSize {
		bf.blockCache.Delete(blockOffset)
	}

	return
}