Skip to content

Commit

Permalink
storage: more accurate accounting in spill_key_index
Browse files Browse the repository at this point in the history
Previously, we ignored:
- inline buffer in `bytes` which affects the actual memory utilization
- the map value.

Actual memory footprint still depends on how these sizes
get rounded up to allocator boundaries, but this is an improvement.
  • Loading branch information
jcsp committed Aug 5, 2022
1 parent c1b1e9c commit 87437ff
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 9 deletions.
19 changes: 10 additions & 9 deletions src/v/storage/spill_key_index.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,10 @@ ss::future<> spill_key_index::index(

ss::future<> spill_key_index::add_key(compaction_key b, value_type v) {
auto f = ss::now();
auto const key_size = b.size();
auto const expected_size = idx_mem_usage() + _keys_mem_usage + key_size;
auto const entry_size = entry_mem_usage(b);
auto const expected_size = idx_mem_usage() + _keys_mem_usage + entry_size;

auto take_result = _resources.compaction_index_take_bytes(key_size);
auto take_result = _resources.compaction_index_take_bytes(entry_size);
if (_mem_units.count() == 0) {
_mem_units = std::move(take_result.units);
} else {
Expand All @@ -104,8 +104,8 @@ ss::future<> spill_key_index::add_key(compaction_key b, value_type v) {
(take_result.checkpoint_hint && expected_size > min_index_size)
|| expected_size >= _max_mem) {
f = ss::do_until(
[this, key_size, min_index_size] {
size_t total_mem = idx_mem_usage() + _keys_mem_usage + key_size;
[this, entry_size, min_index_size] {
size_t total_mem = idx_mem_usage() + _keys_mem_usage + entry_size;

// Instance-local capacity check
bool local_ok = total_mem < _max_mem;
Expand Down Expand Up @@ -136,9 +136,9 @@ ss::future<> spill_key_index::add_key(compaction_key b, value_type v) {
});
}

return f.then([this, b = std::move(b), v]() mutable {
return f.then([this, entry_size, b = std::move(b), v]() mutable {
// convert iobuf to key
_keys_mem_usage += b.size();
_keys_mem_usage += entry_size;

// No update to _mem_units here: we already took units at top
// of add_key before starting the write.
Expand Down Expand Up @@ -241,8 +241,9 @@ ss::future<> spill_key_index::drain_all_keys() {
},
[this] {
auto node = _midx.extract(_midx.begin());
_keys_mem_usage -= node.key().size();
_mem_units.return_units(node.key().size());
auto mem_usage = entry_mem_usage(node.key());
_keys_mem_usage -= mem_usage;
_mem_units.return_units(mem_usage);
return ss::do_with(
node.key(), node.mapped(), [this](const bytes& k, value_type o) {
return spill(compacted_index::entry_type::key, k, o);
Expand Down
9 changes: 9 additions & 0 deletions src/v/storage/spill_key_index.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,15 @@ class spill_key_index final : public compacted_index_writer::impl {
HashtableDebugAccess<underlying_t>;
return debug::AllocatedByteSize(_midx);
}

size_t entry_mem_usage(const compaction_key& k) const {
// One entry in a node hash map: key and value
// are allocated together, and the key is a basic_sstring with
// internal buffer that may be spilled if key was longer.
auto is_external = k.size() > bytes_inline_size;
return (is_external ? sizeof(k) + k.size() : sizeof(k)) + value_sz;
}

ss::future<> drain_all_keys();
ss::future<> add_key(compaction_key, value_type);
ss::future<> spill(compacted_index::entry_type, bytes_view, value_type);
Expand Down

0 comments on commit 87437ff

Please sign in to comment.