Skip to content

Commit

Permalink
internal/cache: move the bulk of allocations off the Go heap
Browse files Browse the repository at this point in the history
Use C malloc/free for the bulk of cache allocations. This required
elevating `cache.Value` to a public citizen of the cache package. A
distinction is made between manually managed memory and automatically
managed memory. Weak handles can only be made from values stored in
automatically managed memory. Note that weak handles are only used for
the index, filter, and range-del blocks, so the number of weak handles
is O(num-tables).

A finalizer is set on `*allocCache` and `*Cache` in order to ensure that
any outstanding manually allocated memory is released when these objects
are collected.

When `invariants` are enabled, finalizers are also set on `*Value` and
sstable iterators to ensure that we're not leaking manually managed
memory.

Fixes #11
  • Loading branch information
petermattis committed Feb 12, 2020
1 parent d91aa94 commit b880bab
Show file tree
Hide file tree
Showing 22 changed files with 719 additions and 175 deletions.
6 changes: 5 additions & 1 deletion db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -689,6 +689,7 @@ func TestIterLeak(t *testing.T) {
t.Fatal(err)
}
} else {
defer iter.Close()
if err := d.Close(); err == nil {
t.Fatalf("expected failure, but found success")
} else if !strings.HasPrefix(err.Error(), "leaked iterators:") {
Expand All @@ -714,7 +715,10 @@ func TestMemTableReservation(t *testing.T) {
// Add a block to the cache. Note that the memtable size is larger than the
// cache size, so opening the DB should cause this block to be evicted.
tmpID := opts.Cache.NewID()
opts.Cache.Set(tmpID, 0, 0, []byte("hello world"))
helloWorld := []byte("hello world")
value := opts.Cache.AllocManual(len(helloWorld))
copy(value.Buf(), helloWorld)
opts.Cache.Set(tmpID, 0, 0, value).Release()

d, err := Open("", opts)
if err != nil {
Expand Down
29 changes: 25 additions & 4 deletions internal/cache/alloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,11 @@
package cache

import (
"runtime"
"sync"
"time"

"github.com/cockroachdb/pebble/internal/manual"
"golang.org/x/exp/rand"
)

Expand All @@ -30,8 +32,17 @@ var allocPool = sync.Pool{
},
}

// allocNew allocates a slice of size n. The use of sync.Pool provides a
// per-cpu cache of allocCache structures to allocate from.
// allocNew allocates a non-garbage collected slice of size n. Every call to
// allocNew() MUST be paired with a call to allocFree(). Failure to do so will
// result in a memory leak. The use of sync.Pool provides a per-cpu cache of
// allocCache structures to allocate from.
//
// TODO(peter): Is the allocCache still necessary for performance? Before the
// introduction of manual memory management, the allocCache dramatically
// reduced GC pressure by reducing allocation bandwidth. It no longer serves
// this purpose because manual.{New,Free} don't produce any GC pressure. Will
// need to run benchmark workloads to see if this can be removed which would
// allow the removal of the one required use of runtime.SetFinalizer.
func allocNew(n int) []byte {
a := allocPool.Get().(*allocCache)
b := a.alloc(n)
Expand Down Expand Up @@ -73,12 +84,20 @@ func newAllocCache() *allocCache {
bufs: make([][]byte, 0, allocCacheCountLimit),
}
c.rnd.Seed(uint64(time.Now().UnixNano()))
runtime.SetFinalizer(c, freeAllocCache)
return c
}

func freeAllocCache(obj interface{}) {
c := obj.(*allocCache)
for i := range c.bufs {
manual.Free(c.bufs[i])
}
}

func (c *allocCache) alloc(n int) []byte {
if n < allocCacheMinSize || n >= allocCacheMaxSize {
return make([]byte, n)
return manual.New(n)
}

class := sizeToClass(n)
Expand All @@ -92,12 +111,13 @@ func (c *allocCache) alloc(n int) []byte {
}
}

return make([]byte, n, classToSize(class))
return manual.New(classToSize(class))[:n]
}

func (c *allocCache) free(b []byte) {
n := cap(b)
if n < allocCacheMinSize || n >= allocCacheMaxSize {
manual.Free(b)
return
}
b = b[:n:n]
Expand All @@ -117,6 +137,7 @@ func (c *allocCache) free(b []byte) {
// are biased, but that is fine for the usage here.
j := (uint32(len(c.bufs)) * (uint32(c.rnd.Uint64()) & ((1 << 16) - 1))) >> 16
c.size -= cap(c.bufs[j])
manual.Free(c.bufs[j])
c.bufs[i], c.bufs[j] = nil, c.bufs[i]
c.bufs = c.bufs[:i]
}
Expand Down
8 changes: 5 additions & 3 deletions internal/cache/alloc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@ package cache
import (
"testing"
"unsafe"

"github.com/cockroachdb/pebble/internal/manual"
)

func TestAllocCache(t *testing.T) {
c := newAllocCache()
for i := 0; i < 64; i++ {
c.free(make([]byte, 1025))
c.free(manual.New(1025))
if c.size == 0 {
t.Fatalf("expected cache size to be non-zero")
}
Expand All @@ -34,7 +36,7 @@ func TestAllocCache(t *testing.T) {
func TestAllocCacheEvict(t *testing.T) {
c := newAllocCache()
for i := 0; i < allocCacheCountLimit; i++ {
c.free(make([]byte, 1024))
c.free(manual.New(1024))
}

bufs := make([][]byte, allocCacheCountLimit)
Expand All @@ -61,7 +63,7 @@ func BenchmarkAllocCache(b *testing.B) {
// Populate the cache with buffers if one size class.
c := newAllocCache()
for i := 0; i < allocCacheCountLimit; i++ {
c.free(make([]byte, 1024))
c.free(manual.New(1024))
}

// Benchmark allocating buffers of a different size class.
Expand Down
Loading

0 comments on commit b880bab

Please sign in to comment.