Skip to content

Commit

Permalink
refactor: golang-lru → golang-lru/v2
Browse files Browse the repository at this point in the history
+ remove mentions of ARC

Closes #366
  • Loading branch information
lidel authored and hacdias committed Jun 27, 2023
1 parent a87f9ed commit 9b8dea9
Show file tree
Hide file tree
Showing 11 changed files with 139 additions and 128 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ The following emojis are used to highlight certain changes:

### Fixed

- Removed mentions of unused ARC algorithm ([#336](https://github.com/ipfs/boxo/issues/366#issuecomment-1597253540))

### Security

## [0.10.1] - 2023-06-19
Expand Down
2 changes: 1 addition & 1 deletion blockstore/bloom_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func testBloomCached(ctx context.Context, bs Blockstore) (*bloomcache, error) {
ctx = context.Background()
}
opts := DefaultCacheOpts()
opts.HasARCCacheSize = 0
opts.HasTwoQueueCacheSize = 0
bbs, err := CachedBlockstore(ctx, bs, opts)
if err == nil {
return bbs.(*bloomcache), nil
Expand Down
12 changes: 6 additions & 6 deletions blockstore/caching.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@ import (
type CacheOpts struct {
HasBloomFilterSize int // 1 byte
HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers
HasARCCacheSize int // 32 bytes
HasTwoQueueCacheSize int // 32 bytes
}

// DefaultCacheOpts returns a CacheOpts initialized with default values.
func DefaultCacheOpts() CacheOpts {
return CacheOpts{
HasBloomFilterSize: 512 << 10,
HasBloomFilterHashes: 7,
HasARCCacheSize: 64 << 10,
HasTwoQueueCacheSize: 64 << 10,
}
}

// CachedBlockstore returns a blockstore wrapped in an ARCCache and
// CachedBlockstore returns a blockstore wrapped in an TwoQueueCache and
// then in a bloom filter cache, if the options indicate it.
func CachedBlockstore(
ctx context.Context,
Expand All @@ -33,7 +33,7 @@ func CachedBlockstore(
cbs = bs

if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 ||
opts.HasARCCacheSize < 0 {
opts.HasTwoQueueCacheSize < 0 {
return nil, errors.New("all options for cache need to be greater than zero")
}

Expand All @@ -43,8 +43,8 @@ func CachedBlockstore(

ctx = metrics.CtxSubScope(ctx, "bs.cache")

if opts.HasARCCacheSize > 0 {
cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize)
if opts.HasTwoQueueCacheSize > 0 {
cbs, err = newTwoQueueCachedBS(ctx, cbs, opts.HasTwoQueueCacheSize)
}
if opts.HasBloomFilterSize != 0 {
// *8 because of bytes to bits conversion
Expand Down
2 changes: 1 addition & 1 deletion blockstore/caching_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (

func TestCachingOptsLessThanZero(t *testing.T) {
opts := DefaultCacheOpts()
opts.HasARCCacheSize = -1
opts.HasTwoQueueCacheSize = -1

if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil {
t.Error("wrong ARC setting was not detected")
Expand Down
63 changes: 33 additions & 30 deletions blockstore/arc_cache.go → blockstore/twoqueue_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"sort"
"sync"

lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
Expand All @@ -20,15 +20,17 @@ type lock struct {
refcnt int
}

// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) that
// tqcache wraps a BlockStore with an [TwoQueueCache] that
// does not store the actual blocks, just metadata about them: existence and
// size. This provides block access-time improvements, allowing
// to short-cut many searches without querying the underlying datastore.
type arccache struct {
//
// [TwoQueueCache]: https://pkg.go.dev/github.com/hashicorp/golang-lru/v2#TwoQueueCache
type tqcache struct {
lklk sync.Mutex
lks map[string]*lock

cache *lru.TwoQueueCache
cache *lru.TwoQueueCache[string, any]

blockstore Blockstore
viewer Viewer
Expand All @@ -37,24 +39,25 @@ type arccache struct {
total metrics.Counter
}

var _ Blockstore = (*arccache)(nil)
var _ Viewer = (*arccache)(nil)
var _ Blockstore = (*tqcache)(nil)
var _ Viewer = (*tqcache)(nil)

func newARCCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*arccache, error) {
cache, err := lru.New2Q(lruSize)
func newTwoQueueCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*tqcache, error) {
cache, err := lru.New2Q[string, any](lruSize)
if err != nil {
return nil, err
}
c := &arccache{cache: cache, blockstore: bs, lks: make(map[string]*lock)}
c.hits = metrics.NewCtx(ctx, "arc.hits_total", "Number of ARC cache hits").Counter()
c.total = metrics.NewCtx(ctx, "arc_total", "Total number of ARC cache requests").Counter()

c := &tqcache{cache: cache, blockstore: bs, lks: make(map[string]*lock)}
c.hits = metrics.NewCtx(ctx, "boxo_blockstore.cache_hits", "Number of blockstore cache hits").Counter()
c.total = metrics.NewCtx(ctx, "boxo_blockstore.cache_total", "Total number of blockstore cache requests").Counter()
if v, ok := bs.(Viewer); ok {
c.viewer = v
}
return c, nil
}

func (b *arccache) lock(k string, write bool) {
func (b *tqcache) lock(k string, write bool) {
b.lklk.Lock()
lk, ok := b.lks[k]
if !ok {
Expand All @@ -70,7 +73,7 @@ func (b *arccache) lock(k string, write bool) {
}
}

func (b *arccache) unlock(key string, write bool) {
func (b *tqcache) unlock(key string, write bool) {
b.lklk.Lock()
lk := b.lks[key]
lk.refcnt--
Expand All @@ -89,7 +92,7 @@ func cacheKey(k cid.Cid) string {
return string(k.Hash())
}

func (b *arccache) DeleteBlock(ctx context.Context, k cid.Cid) error {
func (b *tqcache) DeleteBlock(ctx context.Context, k cid.Cid) error {
if !k.Defined() {
return nil
}
Expand All @@ -112,9 +115,9 @@ func (b *arccache) DeleteBlock(ctx context.Context, k cid.Cid) error {
return err
}

func (b *arccache) Has(ctx context.Context, k cid.Cid) (bool, error) {
func (b *tqcache) Has(ctx context.Context, k cid.Cid) (bool, error) {
if !k.Defined() {
logger.Error("undefined cid in arccache")
logger.Error("undefined cid in tqcache")
// Return cache invalid so the call to blockstore happens
// in case of invalid key and correct error is created.
return false, nil
Expand All @@ -137,7 +140,7 @@ func (b *arccache) Has(ctx context.Context, k cid.Cid) (bool, error) {
return has, nil
}

func (b *arccache) GetSize(ctx context.Context, k cid.Cid) (int, error) {
func (b *tqcache) GetSize(ctx context.Context, k cid.Cid) (int, error) {
if !k.Defined() {
return -1, ipld.ErrNotFound{Cid: k}
}
Expand Down Expand Up @@ -168,7 +171,7 @@ func (b *arccache) GetSize(ctx context.Context, k cid.Cid) (int, error) {
return blockSize, err
}

func (b *arccache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
func (b *tqcache) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
// shortcircuit and fall back to Get if the underlying store
// doesn't support Viewer.
if b.viewer == nil {
Expand Down Expand Up @@ -212,7 +215,7 @@ func (b *arccache) View(ctx context.Context, k cid.Cid, callback func([]byte) er
return cberr
}

func (b *arccache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
func (b *tqcache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
if !k.Defined() {
return nil, ipld.ErrNotFound{Cid: k}
}
Expand All @@ -235,7 +238,7 @@ func (b *arccache) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
return bl, err
}

func (b *arccache) Put(ctx context.Context, bl blocks.Block) error {
func (b *tqcache) Put(ctx context.Context, bl blocks.Block) error {
key := cacheKey(bl.Cid())

if has, _, ok := b.queryCache(key); ok && has {
Expand Down Expand Up @@ -310,7 +313,7 @@ func newKeyedBlocks(cap int) *keyedBlocks {
}
}

func (b *arccache) PutMany(ctx context.Context, bs []blocks.Block) error {
func (b *tqcache) PutMany(ctx context.Context, bs []blocks.Block) error {
good := newKeyedBlocks(len(bs))
for _, blk := range bs {
// call put on block if result is inconclusive or we are sure that
Expand Down Expand Up @@ -348,19 +351,19 @@ func (b *arccache) PutMany(ctx context.Context, bs []blocks.Block) error {
return nil
}

func (b *arccache) HashOnRead(enabled bool) {
func (b *tqcache) HashOnRead(enabled bool) {
b.blockstore.HashOnRead(enabled)
}

func (b *arccache) cacheHave(key string, have bool) {
func (b *tqcache) cacheHave(key string, have bool) {
b.cache.Add(key, cacheHave(have))
}

func (b *arccache) cacheSize(key string, blockSize int) {
func (b *tqcache) cacheSize(key string, blockSize int) {
b.cache.Add(key, cacheSize(blockSize))
}

func (b *arccache) cacheInvalidate(key string) {
func (b *tqcache) cacheInvalidate(key string) {
b.cache.Remove(key)
}

Expand All @@ -375,7 +378,7 @@ func (b *arccache) cacheInvalidate(key string) {
//
// When ok is true, exists carries the correct answer, and size carries the
// size, if known, or -1 if not.
func (b *arccache) queryCache(k string) (exists bool, size int, ok bool) {
func (b *tqcache) queryCache(k string) (exists bool, size int, ok bool) {
b.total.Inc()

h, ok := b.cache.Get(k)
Expand All @@ -391,18 +394,18 @@ func (b *arccache) queryCache(k string) (exists bool, size int, ok bool) {
return false, -1, false
}

func (b *arccache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
func (b *tqcache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return b.blockstore.AllKeysChan(ctx)
}

func (b *arccache) GCLock(ctx context.Context) Unlocker {
func (b *tqcache) GCLock(ctx context.Context) Unlocker {
return b.blockstore.(GCBlockstore).GCLock(ctx)
}

func (b *arccache) PinLock(ctx context.Context) Unlocker {
func (b *tqcache) PinLock(ctx context.Context) Unlocker {
return b.blockstore.(GCBlockstore).PinLock(ctx)
}

func (b *arccache) GCRequested(ctx context.Context) bool {
func (b *tqcache) GCRequested(ctx context.Context) bool {
return b.blockstore.(GCBlockstore).GCRequested(ctx)
}
Loading

0 comments on commit 9b8dea9

Please sign in to comment.