Skip to content

Commit

Permalink
add unit tests, test=develop (PaddlePaddle#36910)
Browse files Browse the repository at this point in the history
  • Loading branch information
Shixiaowei02 committed Nov 2, 2021
1 parent b9defb4 commit e5aa145
Show file tree
Hide file tree
Showing 12 changed files with 610 additions and 1 deletion.
2 changes: 1 addition & 1 deletion paddle/pten/core/tensor_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ struct DenseTensorMeta {
/// marked with `const` are expected to remain unchanged.
const bool is_scalar{false};
DDim dims;
const DataType type{DataType::FLOAT32};
const DataType type{DataType::UNDEFINED};
const DataLayout layout{DataLayout::NCHW};
LoD lod;
};
Expand Down
3 changes: 3 additions & 0 deletions paddle/pten/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
add_subdirectory(core)
add_subdirectory(utils)

cc_test(pten_backend_test SRCS backend_test.cc DEPS gtest)
cc_test(pten_data_layout_test SRCS data_layout_test.cc DEPS gtest)
cc_test(pten_data_type_test SRCS data_type_test.cc DEPS gtest)
Expand Down
3 changes: 3 additions & 0 deletions paddle/pten/tests/core/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
cc_test(test_allocator SRCS test_allocator.cc DEPS tensor_base)
cc_test(test_storage SRCS test_storage.cc DEPS tensor_base)
cc_test(test_dense_tensor SRCS test_dense_tensor.cc DEPS dense_tensor)
89 changes: 89 additions & 0 deletions paddle/pten/tests/core/allocator.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>

#include "paddle/pten/core/allocator.h"

namespace pten {
namespace tests {

class HostAllocatorSample : public pten::RawAllocator {
public:
using Place = paddle::platform::Place;
void* Allocate(size_t bytes_size) override {
return ::operator new(bytes_size);
}
void Deallocate(void* ptr, size_t bytes_size) override {
return ::operator delete(ptr);
}
const Place& place() const override { return place_; }

private:
Place place_{paddle::platform::CPUPlace()};
};

class FancyAllocator : public pten::Allocator {
public:
static void Delete(void* data) { ::operator delete(data); }

Allocation Allocate(size_t bytes_size) override {
void* data = ::operator new(bytes_size);
return Allocation(data, data, &Delete, paddle::platform::CPUPlace());
}
};

template <typename T>
struct CustomAllocator {
using value_type = T;
using Allocator = pten::RawAllocator;

explicit CustomAllocator(const std::shared_ptr<Allocator>& a) noexcept
: alloc_(a) {}

CustomAllocator(const CustomAllocator&) noexcept = default;
T* allocate(std::size_t n) {
return static_cast<T*>(alloc_->Allocate(n * sizeof(T)));
}
void deallocate(T* p, std::size_t n) {
return alloc_->Deallocate(p, sizeof(T) * n);
}

template <typename R, typename U>
friend bool operator==(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;
template <typename R, typename U>
friend bool operator!=(const CustomAllocator<R>&,
const CustomAllocator<U>&) noexcept;

private:
std::shared_ptr<Allocator> alloc_;
};

template <typename T, typename U>
inline bool operator==(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ == &rhs.alloc_;
}

template <typename T, typename U>
inline bool operator!=(const CustomAllocator<T>& lhs,
const CustomAllocator<U>& rhs) noexcept {
return &lhs.alloc_ != &rhs.alloc_;
}

} // namespace tests
} // namespace pten
47 changes: 47 additions & 0 deletions paddle/pten/tests/core/random.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <random>
#include <type_traits>

namespace pten {
namespace tests {

template <typename T,
typename =
typename std::enable_if<std::is_arithmetic<T>::value>::type>
class RandomGenerator {
using distribution_type =
typename std::conditional<std::is_integral<T>::value,
std::uniform_int_distribution<T>,
std::uniform_real_distribution<T>>::type;

std::default_random_engine engine;
distribution_type distribution;

public:
auto operator()() -> decltype(distribution(engine)) {
return distribution(engine);
}
};

template <typename Container, typename T = typename Container::value_type>
auto make_generator(Container const&) -> decltype(RandomGenerator<T>()) {
return RandomGenerator<T>();
}

} // namespace tests
} // namespace pten
91 changes: 91 additions & 0 deletions paddle/pten/tests/core/test_allocator.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <algorithm>
#include <vector>

#include "gtest/gtest.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/pten/tests/core/allocator.h"
#include "paddle/pten/tests/core/random.h"
#include "paddle/pten/tests/core/timer.h"

namespace pten {
namespace tests {

template <typename T>
bool host_allocator_test(size_t vector_size) {
std::vector<T> src(vector_size);
std::generate(src.begin(), src.end(), make_generator(src));
std::vector<T, CustomAllocator<T>> dst(
src.begin(),
src.end(),
CustomAllocator<T>(std::make_shared<HostAllocatorSample>()));
return std::equal(src.begin(), src.end(), dst.begin());
}

TEST(raw_allocator, host) {
CHECK(host_allocator_test<float>(1000));
CHECK(host_allocator_test<int32_t>(1000));
CHECK(host_allocator_test<int64_t>(1000));
}

class StorageRawAlloc {
public:
StorageRawAlloc(const std::shared_ptr<RawAllocator>& a, size_t size)
: alloc_(a) {
data_ = alloc_->Allocate(size);
}
~StorageRawAlloc() { alloc_->Deallocate(data_, size); }

private:
void* data_;
size_t size;
std::shared_ptr<RawAllocator> alloc_;
};

class StorageFancyAlloc {
public:
StorageFancyAlloc(const std::shared_ptr<Allocator>& a, size_t size)
: alloc_(a), allocation_(a->Allocate(size)) {}

private:
std::shared_ptr<Allocator> alloc_;
Allocation allocation_;
};

TEST(benchmark, allocator) {
std::shared_ptr<RawAllocator> raw_allocator(new HostAllocatorSample);
std::shared_ptr<Allocator> fancy_allocator(new FancyAllocator);
const size_t cycles = 100;
Timer timer;
double t1{}, t2{};
for (size_t i = 0; i < cycles; ++i) {
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageRawAlloc(raw_allocator, i * 100);
}
t1 += timer.toc();
timer.tic();
for (size_t i = 0; i < cycles; ++i) {
StorageFancyAlloc(fancy_allocator, i * 100);
}
t2 += timer.toc();
}
std::cout << "The cost of raw alloc is " << t1 << "ms.\n";
std::cout << "The cost of fancy alloc with place is " << t2 << "ms.\n";
}

} // namespace tests
} // namespace pten
127 changes: 127 additions & 0 deletions paddle/pten/tests/core/test_dense_tensor.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "gtest/gtest.h"

#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/tests/core/allocator.h"

namespace pten {
namespace tests {

TEST(dense_tensor, meta) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
// TODO(Shixiaowei02): need to check the lod is valid.
const std::vector<std::vector<size_t>> lod{};

DenseTensorMeta meta_0;
CHECK(!meta_0.valid());

DenseTensorMeta meta_1(dtype, dims);
CHECK(meta_1.type == dtype);
CHECK(meta_1.dims == dims);
CHECK(meta_1.valid());

DenseTensorMeta meta_2(dtype, dims, layout);
CHECK(meta_2.type == dtype);
CHECK(meta_2.dims == dims);
CHECK(meta_2.layout == layout);
CHECK(meta_2.valid());

DenseTensorMeta meta_3(dtype, dims, layout, lod);
CHECK(meta_3.type == dtype);
CHECK(meta_3.dims == dims);
CHECK(meta_3.layout == layout);
CHECK(meta_3.lod == lod);
CHECK(meta_3.valid());

DenseTensorMeta meta_4(meta_3);
CHECK(meta_4.type == dtype);
CHECK(meta_4.dims == dims);
CHECK(meta_4.layout == layout);
CHECK(meta_4.lod == lod);
CHECK(meta_4.valid());

DenseTensorMeta meta_5(std::move(meta_4));
CHECK(meta_5.type == dtype);
CHECK(meta_5.dims == dims);
CHECK(meta_5.layout == layout);
CHECK(meta_5.lod == lod);
CHECK(meta_5.valid());
}

TEST(dense_tensor, def_ctor) {
DenseTensor tensor_0;
CHECK(!tensor_0.valid());
}

TEST(dense_tensor, ctor) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
const std::vector<std::vector<size_t>> lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);

auto alloc = std::make_shared<FancyAllocator>();

auto check_dense_tensor = [](const DenseTensor& t,
const DenseTensorMeta& m) -> bool {
bool r{true};
r = r && (t.numel() == product(m.dims));
r = r && (t.dims() == m.dims);
r = r && (t.data_type() == m.type);
r = r && (t.layout() == m.layout);
r = r && (t.place() == paddle::platform::CPUPlace());
r = r && t.initialized();
r = r && t.IsSharedWith(t);
return r;
};

DenseTensor tensor_0(alloc, meta);
check_dense_tensor(tensor_0, meta);

DenseTensor tensor_1(alloc, DenseTensorMeta(meta));
check_dense_tensor(tensor_0, meta);

DenseTensor tensor_2(make_intrusive<TensorStorage>(alloc), meta);
CHECK(tensor_2.data<int8_t>() == nullptr);
CHECK_NOTNULL(tensor_2.mutable_data<int8_t>());
check_dense_tensor(tensor_2, meta);
}

TEST(dense_tensor, resize) {
const DDim dims({1, 2});
const DataType dtype{DataType::INT8};
const DataLayout layout{DataLayout::NHWC};
const std::vector<std::vector<size_t>> lod{};
DenseTensorMeta meta(dtype, dims, layout, lod);

auto alloc = std::make_shared<FancyAllocator>();
DenseTensor tensor_0(alloc, meta);

CHECK_EQ(tensor_0.memory_size(), 2u);
tensor_0.check_memory_size();
tensor_0.Resize({1, 2, 3});
CHECK_EQ(tensor_0.memory_size(), 2u);
tensor_0.mutable_data<int8_t>();
CHECK_EQ(tensor_0.memory_size(), 6u);

auto storage = tensor_0.release();
CHECK_EQ(storage->size(), 6u);
}

} // namespace tests
} // namespace pten
Loading

0 comments on commit e5aa145

Please sign in to comment.