Skip to content

Commit

Permalink
Merge pull request #41 from qingshui/paddlebox
Browse files Browse the repository at this point in the history
add gpu memory status info
  • Loading branch information
qingshui committed Jun 8, 2022
2 parents 0f9e550 + f140966 commit 7aee852
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 4 deletions.
10 changes: 7 additions & 3 deletions paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,15 @@ AutoGrowthBestFitAllocator::AutoGrowthBestFitAllocator(
: underlying_allocator_(
std::make_shared<AlignedAllocator>(underlying_allocator, alignment)),
alignment_(alignment),
chunk_size_(std::max(AlignedSize(chunk_size, alignment), alignment)) {}
chunk_size_(std::max(AlignedSize(chunk_size, alignment), alignment)) {
VLOG(0) << "AutoGrowthBestFitAllocator init";
}

Allocation *AutoGrowthBestFitAllocator::AllocateImpl(size_t size) {
size = AlignedSize(size, alignment_);

std::lock_guard<std::mutex> guard(mtx_);
mem_bytes_.used += size;
auto iter = free_blocks_.lower_bound(std::make_pair(size, nullptr));
BlockIt block_it;
if (iter != free_blocks_.end()) {
Expand Down Expand Up @@ -86,7 +89,7 @@ Allocation *AutoGrowthBestFitAllocator::AllocateImpl(size_t size) {
realloc_size = chunk->allocation_->size();
uint8_t *p = reinterpret_cast<uint8_t *>(chunk->allocation_->ptr());
auto &blocks = chunk->blocks_;

mem_bytes_.total += realloc_size;
size_t remaining_size = realloc_size - size;
if (remaining_size > 0) {
blocks.emplace_back(p, remaining_size, true, chunk);
Expand All @@ -104,7 +107,7 @@ void AutoGrowthBestFitAllocator::FreeImpl(Allocation *allocation) {
std::lock_guard<std::mutex> guard(mtx_);
auto block_it = static_cast<BlockAllocation *>(allocation)->block_it_;
auto &blocks = block_it->chunk_->blocks_;

mem_bytes_.used -= allocation->size();
block_it->is_free_ = true;

if (block_it != blocks.begin()) {
Expand Down Expand Up @@ -152,6 +155,7 @@ uint64_t AutoGrowthBestFitAllocator::FreeIdleChunks() {
++chunk_it;
}
}
mem_bytes_.total -= bytes;
return bytes;
}

Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@ class AutoGrowthBestFitAllocator : public Allocator {
size_t chunk_size = 0);

bool IsAllocThreadSafe() const override { return true; }
// return real used, total is in alloc
size_t GetTotalMemInfo(size_t *total, size_t *available) {
*total = mem_bytes_.total;
*available = mem_bytes_.total - mem_bytes_.used;
return mem_bytes_.used;
}

protected:
Allocation *AllocateImpl(size_t size) override;
Expand Down Expand Up @@ -87,6 +93,12 @@ class AutoGrowthBestFitAllocator : public Allocator {
size_t chunk_size_;

mutable std::mutex mtx_;

struct MemBytes {
size_t used = 0;
size_t total = 0;
};
MemBytes mem_bytes_;
};

} // namespace allocation
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/memory/allocation/retry_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ class RetryAllocator : public Allocator {
}

bool IsAllocThreadSafe() const override { return true; }
// return real used, total is in alloc
size_t GetTotalMemInfo(size_t *total, size_t *available) {
return underlying_allocator_->GetTotalMemInfo(total, available);
}

protected:
void FreeImpl(Allocation *allocation) override;
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/operators/pull_box_sparse_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,9 @@ static void PullBoxSparseFunctor(const framework::ExecutionContext &ctx) {
int batch_size = -1;
if (slot_idx != -1) {
if (slot_idx > static_cast<int>(slot_size)) {
batch_size = 1;
const auto *slot = inputs[0];
batch_size =
slot->lod().size() ? slot->lod()[0].size() - 1 : slot->dims()[0];
} else {
batch_size = inputs[slot_idx]->dims()[0];
}
Expand Down

0 comments on commit 7aee852

Please sign in to comment.