From f1c9661ce07b329a1aa5bbb5f56c1e6f117b9ebe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9F=B3=E6=99=93=E4=BC=9F?= <39303645+Shixiaowei02@users.noreply.github.com> Date: Wed, 20 Oct 2021 22:49:54 +0800 Subject: [PATCH] add a candidate dense tensor class, test=develop (#28) --- paddle/pten/common/data_type.h | 7 +- paddle/pten/core/CMakeLists.txt | 4 + paddle/pten/core/allocator.h | 14 +- paddle/pten/core/candidate/CMakeLists.txt | 1 + paddle/pten/core/candidate/dense_tensor.cc | 145 ++++++++++++++ paddle/pten/core/candidate/dense_tensor.h | 188 ++++++++++++++++++ paddle/pten/core/storage.h | 12 +- paddle/pten/core/tensor_base.h | 8 +- paddle/pten/core/utils/CMakeLists.txt | 0 paddle/pten/hapi/lib/CMakeLists.txt | 2 + paddle/pten/hapi/lib/utils/CMakeLists.txt | 3 + paddle/pten/hapi/lib/utils/allocator.cc | 23 +++ paddle/pten/hapi/lib/utils/allocator.h | 47 +++++ paddle/pten/hapi/lib/utils/storage.cc | 39 ++++ paddle/pten/hapi/lib/utils/storage.h | 91 +++++++++ paddle/pten/hapi/lib/utils/tensor_utils.cc | 19 ++ paddle/pten/hapi/lib/utils/tensor_utils.h | 80 ++++++++ .../pten/hapi/lib/utils/tests/CMakeLists.txt | 2 + .../pten/hapi/lib/utils/tests/test_storage.cc | 65 ++++++ .../hapi/lib/utils/tests/test_tensor_utils.cc | 103 ++++++++++ 20 files changed, 838 insertions(+), 15 deletions(-) create mode 100644 paddle/pten/core/candidate/CMakeLists.txt create mode 100644 paddle/pten/core/candidate/dense_tensor.cc create mode 100644 paddle/pten/core/candidate/dense_tensor.h delete mode 100644 paddle/pten/core/utils/CMakeLists.txt create mode 100644 paddle/pten/hapi/lib/utils/CMakeLists.txt create mode 100644 paddle/pten/hapi/lib/utils/allocator.cc create mode 100644 paddle/pten/hapi/lib/utils/allocator.h create mode 100644 paddle/pten/hapi/lib/utils/storage.cc create mode 100644 paddle/pten/hapi/lib/utils/storage.h create mode 100644 paddle/pten/hapi/lib/utils/tensor_utils.cc create mode 100644 paddle/pten/hapi/lib/utils/tensor_utils.h create mode 100644 paddle/pten/hapi/lib/utils/tests/CMakeLists.txt create mode 100644 paddle/pten/hapi/lib/utils/tests/test_storage.cc create mode 100644 paddle/pten/hapi/lib/utils/tests/test_tensor_utils.cc diff --git a/paddle/pten/common/data_type.h b/paddle/pten/common/data_type.h index 2c0bd96429aa6..f9c6d032f71ed 100644 --- a/paddle/pten/common/data_type.h +++ b/paddle/pten/common/data_type.h @@ -75,8 +75,8 @@ inline size_t SizeOf(DataType data_type) { PADDLE_THROW(platform::errors::Unimplemented( "Data type %d is not supported by tensor.", static_cast(data_type))); - return 0; } + return 0; } #define PT_FOR_EACH_DATA_TYPE(_) \ @@ -84,8 +84,11 @@ inline size_t SizeOf(DataType data_type) { _(int8_t, DataType::INT8) \ _(uint8_t, DataType::UINT8) \ _(int16_t, DataType::INT16) \ - _(int, DataType::INT32) \ + _(uint16_t, DataType::UINT16) \ + _(int32_t, DataType::INT32) \ + _(uint32_t, DataType::UINT32) \ _(int64_t, DataType::INT64) \ + _(uint64_t, DataType::UINT64) \ _(bfloat16, DataType::BFLOAT16) \ _(float16, DataType::FLOAT16) \ _(float, DataType::FLOAT32) \ diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index 448f7123c38b9..ca562332bb79f 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -1,3 +1,5 @@ +add_subdirectory(candidate) + IF(WITH_MKLDNN) set(MKLDNN_CTX_DEPS mkldnn) ELSE() @@ -15,3 +17,5 @@ cc_library(dense_tensor SRCS dense_tensor.cc DEPS enforce data_type ddim allocat cc_library(kernel_factory SRCS kernel_factory.cc DEPS enforce) cc_library(kernel_context SRCS kernel_context.cc DEPS enforce device_context) + +cc_library(tensor_base SRCS tensor_base.cc allocator.cc storage.cc DEPS enforce) diff --git a/paddle/pten/core/allocator.h b/paddle/pten/core/allocator.h index b96e695a4f8cf..c16c4ffaa6a37 100644 --- a/paddle/pten/core/allocator.h +++ b/paddle/pten/core/allocator.h @@ -23,6 +23,8 @@ namespace pten { /// deallocation and construction/destruction of objects. class RawAllocator { public: + using Place = paddle::platform::Place; + /// \brief Default destructor. virtual ~RawAllocator() = default; @@ -43,7 +45,7 @@ class RawAllocator { /// \brief Get the place value of the allocator and the allocation. /// \return The place value of the allocator and the allocation. - virtual const paddle::platform::Place& place() const = 0; + virtual const Place& place() const = 0; }; /// \brief Fancy pointer with context. The use of this data type @@ -52,24 +54,24 @@ class RawAllocator { /// support being inherited. class Allocation final { public: + using Place = paddle::platform::Place; using DeleterFnPtr = void (*)(void*); Allocation() = default; Allocation(Allocation&&) = default; Allocation& operator=(Allocation&&) = default; - Allocation(void* data, const paddle::platform::Place& place) - : data_(data), place_(place) {} + Allocation(void* data, const Place& place) : data_(data), place_(place) {} Allocation(void* data, void* ctx, DeleterFnPtr ctx_deleter, - const paddle::platform::Place& place) + const Place& place) : data_(data), ctx_(ctx, ctx_deleter), place_(place) {} void* operator->() const noexcept { return data_; } operator bool() const noexcept { return data_ || ctx_.Get(); } - const paddle::platform::Place& place() const noexcept { return place_; } + const Place& place() const noexcept { return place_; } void Clear() noexcept { data_ = nullptr; @@ -132,7 +134,7 @@ class Allocation final { Context ctx_; // TODO(Shixiaowei02): Enum needs to be used instead to reduce // the construction overhead by more than 50%. - paddle::platform::Place place_; + Place place_; }; inline void swap(Allocation::Context& a, Allocation::Context& b) noexcept { diff --git a/paddle/pten/core/candidate/CMakeLists.txt b/paddle/pten/core/candidate/CMakeLists.txt new file mode 100644 index 0000000000000..dd670abdba1c1 --- /dev/null +++ b/paddle/pten/core/candidate/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(pten_dense_tensor SRCS dense_tensor.cc DEPS tensor_base) diff --git a/paddle/pten/core/candidate/dense_tensor.cc b/paddle/pten/core/candidate/dense_tensor.cc new file mode 100644 index 0000000000000..325edd1ba077f --- /dev/null +++ b/paddle/pten/core/candidate/dense_tensor.cc @@ -0,0 +1,145 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/core/candidate/dense_tensor.h" + +namespace pten { +namespace candidate { + +DenseTensorMeta::DenseTensorMeta(DataType type, const DDim& dims) + : dims(dims), type(type) {} +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout) + : dims(dims), type(type), layout(layout) {} +DenseTensorMeta::DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout, + const std::vector>& lod) + : dims(dims), type(type), layout(layout), lod(lod) {} + +bool DenseTensorMeta::valid() const noexcept { + bool valid{true}; + valid = valid && (type != DataType::UNDEFINED); + valid = valid && (layout != DataLayout::UNDEFINED); + valid = valid && (is_scalar || product(dims)); + return valid; +} + +DenseTensor::DenseTensor(const std::shared_ptr& a, + const DenseTensorMeta& meta) + : meta_(meta), + storage_( + make_intrusive(a, SizeOf(data_type()) * numel())) {} + +DenseTensor::DenseTensor(const std::shared_ptr& a, + DenseTensorMeta&& meta) + : meta_(std::move(meta)), + storage_( + make_intrusive(a, SizeOf(data_type()) * numel())) {} + +DenseTensor::DenseTensor(intrusive_ptr storage, + const DenseTensorMeta& meta) + : meta_(meta), storage_(std::move(storage)) {} + +DenseTensor::DenseTensor(intrusive_ptr storage, DenseTensorMeta&& meta) + : meta_(std::move(meta)), storage_(std::move(storage)) {} + +int64_t DenseTensor::numel() const { + if (meta_.is_scalar) { + return 1; + } + return product(meta_.dims); +} + +bool DenseTensor::SharesStorageWith(const DenseTensor& b) const { + return storage_.get() == b.storage_.get() && storage_.get() != nullptr; +} + +template +T* DenseTensor::mutable_data(size_t request_bytes) { + PADDLE_ENFORCE( + valid(), + paddle::platform::errors::PreconditionNotMet( + "The meta data must be valid when call the mutable data function.")); + PADDLE_ENFORCE_NOT_NULL( + storage_, + paddle::platform::errors::PreconditionNotMet( + "The storage must be valid when call the mutable data function.")); + PADDLE_ENFORCE( + (data_type() == paddle::experimental::CppTypeToDataType::Type()), + paddle::platform::errors::PreconditionNotMet( + "The type of data we are trying to retrieve does not match the " + "type of data currently contained in the container.")); + size_t bytes = numel() * SizeOf(data_type()); + if (request_bytes) { + PADDLE_ENFORCE_GE(request_bytes, + bytes, + paddle::platform::errors::InvalidArgument( + "The reserved size %d should be enough to meet the " + "volume required by metadata %d.", + request_bytes, + bytes)); + bytes = request_bytes; + } + if (storage_->size() < bytes) { + storage_->Realloc(bytes); + } + return static_cast(storage_->data()); +} + +template +const T* DenseTensor::data() const { + PADDLE_ENFORCE_NOT_NULL( + storage_, + paddle::platform::errors::PreconditionNotMet( + "The storage must be valid when call the mutable data function.")); + PADDLE_ENFORCE( + (data_type() == paddle::experimental::CppTypeToDataType::Type()), + paddle::platform::errors::PreconditionNotMet( + "The type of data we are trying to retrieve does not match the " + "type of data currently contained in the container.")); + return static_cast(storage_->data()); +} + +void DenseTensor::check_memory_size() const { + size_t bytes = numel() * SizeOf(data_type()); + PADDLE_ENFORCE_GE(memory_size(), + bytes, + paddle::platform::errors::InvalidArgument( + "The memory size %d should be enough to meet the " + "volume required by metadata %d.", + memory_size(), + bytes)); +} + +#define DATA_MEMBER_FUNC_INSTANTIATION(dtype) \ + template dtype* DenseTensor::mutable_data(size_t request_bytes); \ + template const dtype* DenseTensor::data() const; + +DATA_MEMBER_FUNC_INSTANTIATION(int8_t); +DATA_MEMBER_FUNC_INSTANTIATION(uint8_t); +DATA_MEMBER_FUNC_INSTANTIATION(int16_t); +DATA_MEMBER_FUNC_INSTANTIATION(uint16_t); +DATA_MEMBER_FUNC_INSTANTIATION(int32_t); +DATA_MEMBER_FUNC_INSTANTIATION(uint32_t); +DATA_MEMBER_FUNC_INSTANTIATION(int64_t); +DATA_MEMBER_FUNC_INSTANTIATION(uint64_t); +DATA_MEMBER_FUNC_INSTANTIATION(float); +DATA_MEMBER_FUNC_INSTANTIATION(double); + +#undef DATA_MEMBER_FUNC_INSTANTIATION + +} // namespace candidate +} // namespace pten diff --git a/paddle/pten/core/candidate/dense_tensor.h b/paddle/pten/core/candidate/dense_tensor.h new file mode 100644 index 0000000000000..21a093439529f --- /dev/null +++ b/paddle/pten/core/candidate/dense_tensor.h @@ -0,0 +1,188 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/core/allocator.h" +#include "paddle/pten/core/storage.h" +#include "paddle/pten/core/tensor_base.h" + +namespace pten { +namespace candidate { + +using DDim = paddle::framework::DDim; + +/// \brief The meta data of dense tensor. Take the structure type +/// and use all default operations. +/// +struct DenseTensorMeta { + using DataType = paddle::experimental::DataType; + using DataLayout = paddle::experimental::DataLayout; + + DenseTensorMeta() = default; + DenseTensorMeta(DataType type, const DDim& dims); + DenseTensorMeta(DataType type, const DDim& dims, DataLayout layout); + DenseTensorMeta(DataType type, + const DDim& dims, + DataLayout layout, + const std::vector>& lod); + + /// \brief Test whether the metadata is valid. Does not throw exceptions. + /// \return Whether the metadata is valid. + bool valid() const noexcept; + + /// During the entire life cycle of a DenseTensor, the following attributes + /// marked with `const` are expected to remain unchanged. + const bool is_scalar{false}; + DDim dims; + const DataType type{DataType::FLOAT32}; + const DataLayout layout{DataLayout::NCHW}; + std::vector> lod; +}; + +/// \brief The Dense tensor store values in a contiguous sequential block +/// of memory where all values are represented. Tensors or multi-dimensional +/// arrays are used in math operators. +/// During the entire life cycle of a DenseTensor, its device type and key +/// metadata are set unchanged. +class DenseTensor : public TensorBase, + public TypeInfoTraits { + public: + /// \brief Construct a dense tensor and allocate space. + /// \param a The allocator used to allocate space. + /// \param meta The meta data of dense tensor. + DenseTensor(const std::shared_ptr& a, const DenseTensorMeta& meta); + + /// \brief Construct a dense tensor and allocate space. + /// \param a The allocator used to allocate space. + /// \param meta The meta data of dense tensor. + DenseTensor(const std::shared_ptr& a, DenseTensorMeta&& meta); + + /// \brief Use existing storage space to create dense tensor. This interface + /// can be used to deliberately create an uninitialized dense tensor. + /// \param storage The existing storage. + /// \param meta The meta data of dense tensor. + DenseTensor(intrusive_ptr storage, const DenseTensorMeta& meta); + + /// \brief Use existing storage space to create dense tensor. This interface + /// can be used to deliberately create an uninitialized dense tensor. + /// \param storage The existing storage. + /// \param meta The meta data of dense tensor. + DenseTensor(intrusive_ptr storage, DenseTensorMeta&& meta); + + /// \brief Because dense tensor is a kind of container, we give a default + /// constructor to use for stl container. But the dense tensor created with + /// the default constructor is not practical. + DenseTensor() = default; + + /// \brief Because dense tensor is a resource handle, we provide a default + /// move constructor to support move semantics. + DenseTensor(DenseTensor&& other) = default; + + /// \brief We do not recommend deep copy of dense tensor because of its + /// efficiency and complexity across devices. The operation is disabled here. + DenseTensor(const DenseTensor& other) = delete; + + /// \brief Destroy the tensor object and release exclusive resources. + virtual ~DenseTensor() = default; + + public: + /// \brief Returns the name of the class for type traits. + /// \return The name of the class. + static const char* name() { return "DenseTensor"; } + + /// \brief Returns the number of elements contained in tensor. + /// \return The number of elements contained in tensor. + int64_t numel() const; + + /// \brief Returns the dims of the tensor. + /// \return The dims of the tensor. + const DDim& dims() const noexcept { return meta_.dims; } + + /// \brief Returns the lod of the tensor. + /// \return The lod of the tensor. + const std::vector>& lod() const noexcept { + return meta_.lod; + } + + /// \brief Returns the data type of the tensor. + /// \return The data type of the tensor. + DataType data_type() const noexcept { return meta_.type; } + + /// \brief Returns the data layout of the tensor. + /// \return The data layout of the tensor. + DataLayout layout() const noexcept { return meta_.layout; } + + /// \brief Returns the data place of the tensor. + /// \return The data place of the tensor. + const Place& place() const { return storage_->place(); } + + /// \brief Test whether the metadata is valid. + /// \return Whether the metadata is valid. + bool valid() const noexcept { return meta_.valid(); } + + /// \brief Test whether the storage is allocated. + /// return Whether the storage is allocated. + bool initialized() const { return storage_->data(); } + + /// \brief Check if storage is shared with other objects. + /// \return Whether the storage is shared with other objects. + bool SharesStorageWith(const DenseTensor& b) const; + + /// \brief Change the dims information in the metadata, and the corresponding + /// memory allocation will occur when the `mutable_data` is called. + /// \param dims The new dims of the dense tensor. + void Resize(const DDim& dims) noexcept { meta_.dims = dims; } + + /// \brief Returns the actual storage size occupied by tensor, may be larger + /// than its shape dims. + /// \return The actual storage size occupied by tensor. + size_t memory_size() const { return storage_->size(); } + + /// \brief Check that the storage area is large enough to hold the data of the + /// metadata size, and throw an exception if the conditions are not met. + void check_memory_size() const; + + /// \brief Release the storage area for other purposes. Because of the + /// destruction of encapsulation, we do not support two dense tensors directly + /// sharing the same intrusive pointer. + /// \return The rvalue of instrusize pointer releated to the released storage. + intrusive_ptr release() { return std::move(storage_); } + + /// \brief Get the mutable data pointer value of type T. + /// Memory allocation may occur when calling this interface: + /// 1. When the storage size is not enough to meet the current shape of the + /// data. + /// 2. When more request_bytes parameters are used to reserve the data + /// storage. + /// param request_bytes The bytes to reserve the data storage. + /// \return The mutable data pointer value of type T. + template + T* mutable_data(size_t request_bytes = 0); + + /// \brief Get the const data pointer value of type T. + /// \return The const data pointer value of type T. + template + const T* data() const; + + private: + DenseTensorMeta meta_; + intrusive_ptr storage_; +}; + +} // namespace candidate +} // namespace pten diff --git a/paddle/pten/core/storage.h b/paddle/pten/core/storage.h index b1c6de7fff8f6..430572e253d6e 100644 --- a/paddle/pten/core/storage.h +++ b/paddle/pten/core/storage.h @@ -19,6 +19,7 @@ limitations under the License. */ #include "boost/intrusive_ptr.hpp" #include "paddle/pten/core/utils/intrusive_ptr.h" #include "paddle/pten/core/utils/intrusive_ref_counter.h" +#include "paddle/pten/core/utils/type_info.h" #include "paddle/fluid/platform/place.h" #include "paddle/pten/core/allocator.h" @@ -30,6 +31,7 @@ namespace pten { /// all default copy operations to ensure the integrity of the package. class Storage : public intrusive_ref_counter { public: + using Place = paddle::platform::Place; Storage() = default; Storage(const Storage&) = delete; @@ -43,7 +45,7 @@ class Storage : public intrusive_ref_counter { void* data() const noexcept { return data_.operator->(); } virtual size_t size() const = 0; - virtual const paddle::platform::Place& place() const = 0; + virtual const Place& place() const = 0; virtual bool OwnsMemory() const = 0; virtual void Realloc(size_t n) = 0; @@ -53,18 +55,20 @@ class Storage : public intrusive_ref_counter { class TensorStorage : public Storage { public: + using Place = paddle::platform::Place; + explicit TensorStorage(const std::shared_ptr& a) : alloc_(a) {} TensorStorage(const std::shared_ptr& a, size_t size) : Storage(Allocate(a, size)), alloc_(a), size_(size) {} ~TensorStorage() = default; + static const char* name() { return "TensorStorage"; } + void Realloc(size_t size) override; size_t size() const noexcept override { return size_; } - const paddle::platform::Place& place() const override { - return data_.place(); - } + const Place& place() const override { return data_.place(); } bool OwnsMemory() const noexcept override { return true; } const std::shared_ptr& allocator() const noexcept { return alloc_; diff --git a/paddle/pten/core/tensor_base.h b/paddle/pten/core/tensor_base.h index 58d6975d96900..74cc082646fe2 100644 --- a/paddle/pten/core/tensor_base.h +++ b/paddle/pten/core/tensor_base.h @@ -28,6 +28,8 @@ class TensorBase { public: using DataType = paddle::experimental::DataType; using DataLayout = paddle::experimental::DataLayout; + using DDim = paddle::framework::DDim; + using Place = paddle::platform::Place; virtual ~TensorBase() = default; @@ -37,7 +39,7 @@ class TensorBase { /// \brief Returns the dims of the tensor. /// \return The dims of the tensor. - virtual const paddle::framework::DDim& dims() const = 0; + virtual const DDim& dims() const = 0; /// \brief Returns the data type of the tensor. /// \return The data type of the tensor. @@ -49,7 +51,7 @@ class TensorBase { /// \brief Returns the data place of the tensor. /// \return The data place of the tensor. - virtual const paddle::platform::Place& place() const = 0; + virtual const Place& place() const = 0; /// \brief Test whether the metadata is valid. /// \return Whether the metadata is valid. @@ -59,7 +61,7 @@ class TensorBase { /// return Whether the storage is allocated. virtual bool initialized() const = 0; - virtual pten::Backend backend() const = 0; + virtual paddle::experimental::Backend backend() const { return {}; } /// \brief Return the type information of the derived class to support /// safely downcast in non-rtti environment. diff --git a/paddle/pten/core/utils/CMakeLists.txt b/paddle/pten/core/utils/CMakeLists.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/paddle/pten/hapi/lib/CMakeLists.txt b/paddle/pten/hapi/lib/CMakeLists.txt index 54cabb7e69baa..a4726b3d426f6 100644 --- a/paddle/pten/hapi/lib/CMakeLists.txt +++ b/paddle/pten/hapi/lib/CMakeLists.txt @@ -1,3 +1,5 @@ +add_subdirectory(utils) + cc_library(math_api SRCS math.cc DEPS pten) cc_library(linalg_api SRCS linalg.cc DEPS pten) cc_library(creation_api SRCS creation.cc DEPS pten) diff --git a/paddle/pten/hapi/lib/utils/CMakeLists.txt b/paddle/pten/hapi/lib/utils/CMakeLists.txt new file mode 100644 index 0000000000000..4ab33a10dcdc4 --- /dev/null +++ b/paddle/pten/hapi/lib/utils/CMakeLists.txt @@ -0,0 +1,3 @@ +add_subdirectory(tests) + +cc_library(pten_hapi_utils SRCS allocator.cc storage tensor_utils DEPS tensor_base pten_dense_tensor pten_utils) diff --git a/paddle/pten/hapi/lib/utils/allocator.cc b/paddle/pten/hapi/lib/utils/allocator.cc new file mode 100644 index 0000000000000..0c364c97e4d1c --- /dev/null +++ b/paddle/pten/hapi/lib/utils/allocator.cc @@ -0,0 +1,23 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/hapi/lib/utils/allocator.h" + +namespace paddle { +namespace experimental { + +memory::Allocator::AllocationDeleter DefaultAllocator::deleter_; + +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/allocator.h b/paddle/pten/hapi/lib/utils/allocator.h new file mode 100644 index 0000000000000..8a8569c73edae --- /dev/null +++ b/paddle/pten/hapi/lib/utils/allocator.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/memory/allocation/allocator.h" +#include "paddle/fluid/memory/malloc.h" +#include "paddle/pten/core/allocator.h" +#include "paddle/pten/core/storage.h" + +namespace paddle { +namespace experimental { + +class DefaultAllocator : public pten::Allocator { + public: + using Allocation = pten::Allocation; + explicit DefaultAllocator(const paddle::platform::Place& place) + : place_(place) {} + + static void Delete(void* data) { + deleter_(static_cast(data)); + } + + Allocation Allocate(size_t bytes_size) override { + paddle::memory::AllocationPtr a = memory::Alloc(place_, bytes_size); + void* ptr = a->ptr(); + return Allocation(ptr, a.release(), &Delete, place_); + } + + private: + paddle::platform::Place place_; + static paddle::memory::Allocator::AllocationDeleter deleter_; +}; + +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/storage.cc b/paddle/pten/hapi/lib/utils/storage.cc new file mode 100644 index 0000000000000..0682b25c6e0dd --- /dev/null +++ b/paddle/pten/hapi/lib/utils/storage.cc @@ -0,0 +1,39 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/hapi/lib/utils/storage.h" + +namespace paddle { +namespace experimental { + +ExternalStorage::ExternalStorage(void* ptr, + size_t size, + const paddle::platform::Place& place) + : pten::Storage(pten::Allocation(ptr, place)), size_(size) {} + +ExternalStorage::ExternalStorage(const pten::intrusive_ptr& root, + size_t delta, + size_t size) + : Storage(pten::Allocation(static_cast(root->data()) + delta, + root->place())), + size_(size) { + PADDLE_ENFORCE_LE(static_cast(delta + size), + root->size(), + paddle::platform::errors::InvalidArgument( + "The size of the external storage does " + "not meet the metadata requirements.")); +} + +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/storage.h b/paddle/pten/hapi/lib/utils/storage.h new file mode 100644 index 0000000000000..996e98416336b --- /dev/null +++ b/paddle/pten/hapi/lib/utils/storage.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/memory/malloc.h" +#include "paddle/pten/core/storage.h" + +namespace paddle { +namespace experimental { + +class ExternalStorage : public pten::Storage { + public: + ExternalStorage(void* ptr, size_t size, const paddle::platform::Place& place); + ExternalStorage(const pten::intrusive_ptr& root, + size_t delta, + size_t size); + + static const char* name() { return "ExternalStorage"; } + + void Realloc(size_t n) override { + PADDLE_THROW(paddle::platform::errors::Unavailable( + "The external shared storage cannot be reallocated.")); + } + + size_t size() const noexcept override { return size_; } + const paddle::platform::Place& place() const override { + return data_.place(); + } + bool OwnsMemory() const noexcept override { return false; } + + private: + const int64_t size_{0}; +}; + +class SharedStorage : public pten::Storage { + public: + explicit SharedStorage( + const std::shared_ptr& allocation) + : allocation_(allocation) { + CHECK(allocation); + data_ = pten::Allocation(allocation->ptr(), allocation->place()); + size_ = allocation->size(); + } + + static const char* name() { return "SharedStorage"; } + + void Realloc(size_t n) override { + PADDLE_THROW(paddle::platform::errors::Unavailable( + "The external shared storage cannot be reallocated.")); + } + + size_t size() const noexcept override { return size_; } + const paddle::platform::Place& place() const override { + return data_.place(); + } + bool OwnsMemory() const noexcept override { return false; } + + const std::shared_ptr& GetAllocation() { + return allocation_; + } + + private: + int64_t size_{0}; + std::shared_ptr allocation_; +}; + +class TensorStorage : public paddle::memory::allocation::Allocation { + public: + explicit TensorStorage(pten::intrusive_ptr storage) + : paddle::memory::allocation::Allocation( + storage->data(), storage->size(), storage->place()), + storage_(std::move(storage)) {} + + private: + pten::intrusive_ptr storage_; +}; + +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/tensor_utils.cc b/paddle/pten/hapi/lib/utils/tensor_utils.cc new file mode 100644 index 0000000000000..be7feebe8c206 --- /dev/null +++ b/paddle/pten/hapi/lib/utils/tensor_utils.cc @@ -0,0 +1,19 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/hapi/lib/utils/tensor_utils.h" + +namespace paddle { +namespace experimental {} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/tensor_utils.h b/paddle/pten/hapi/lib/utils/tensor_utils.h new file mode 100644 index 0000000000000..9c726260139e3 --- /dev/null +++ b/paddle/pten/hapi/lib/utils/tensor_utils.h @@ -0,0 +1,80 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "paddle/fluid/framework/lod_tensor.h" + +#include "paddle/pten/core/candidate/dense_tensor.h" +#include "paddle/pten/core/convert_utils.h" +#include "paddle/pten/hapi/lib/utils/allocator.h" +#include "paddle/pten/hapi/lib/utils/storage.h" + +namespace paddle { +namespace experimental { + +using namespace pten::candidate; // NOLINT + +template +void SetLoD(DstLoD* dst, const SrcLoD& src) { + dst->reserve(src.size()); + dst->clear(); + for (auto&& v : src) { + dst->emplace_back(v); + } +} + +std::shared_ptr MakeSharedDenseTensor( + const paddle::framework::Tensor& src) { + DenseTensorMeta meta{pten::TransToPtDataType(src.type()), + src.dims(), + pten::TransToPtDataLayout(src.layout())}; + auto shared_storage = pten::make_intrusive(src.Holder()); + return std::make_shared(std::move(shared_storage), + std::move(meta)); +} + +std::shared_ptr MakeSharedDenseTensor( + const paddle::framework::LoDTensor& src) { + DenseTensorMeta meta{pten::TransToPtDataType(src.type()), + src.dims(), + pten::TransToPtDataLayout(src.layout())}; + SetLoD(&meta.lod, src.lod()); + auto shared_storage = pten::make_intrusive(src.Holder()); + return std::make_shared(std::move(shared_storage), + std::move(meta)); +} + +void MovesStorage(DenseTensor* src, paddle::framework::Tensor* dst) { + CHECK(src); + CHECK(dst); + dst->Resize(src->dims()); + auto storage = src->release(); + CHECK(storage->OwnsMemory()); + std::shared_ptr holder( + new TensorStorage(std::move(storage))); + dst->ResetHolderWithType(holder, pten::TransToProtoVarType(src->data_type())); +} + +void MovesStorage(DenseTensor* src, paddle::framework::LoDTensor* dst) { + CHECK(src); + CHECK(dst); + SetLoD(dst->mutable_lod(), src->lod()); + MovesStorage(src, static_cast(dst)); +} + +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/tests/CMakeLists.txt b/paddle/pten/hapi/lib/utils/tests/CMakeLists.txt new file mode 100644 index 0000000000000..8ac30a1fa6909 --- /dev/null +++ b/paddle/pten/hapi/lib/utils/tests/CMakeLists.txt @@ -0,0 +1,2 @@ +cc_test(test_framework_storage SRCS test_storage.cc DEPS pten_hapi_utils) +cc_test(test_framework_tensor_utils SRCS test_tensor_utils.cc DEPS pten_hapi_utils) diff --git a/paddle/pten/hapi/lib/utils/tests/test_storage.cc b/paddle/pten/hapi/lib/utils/tests/test_storage.cc new file mode 100644 index 0000000000000..fbbcd2a3ee0e5 --- /dev/null +++ b/paddle/pten/hapi/lib/utils/tests/test_storage.cc @@ -0,0 +1,65 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include "gtest/gtest.h" + +#include "paddle/pten/hapi/lib/utils/allocator.h" +#include "paddle/pten/hapi/lib/utils/storage.h" + +namespace paddle { +namespace experimental { +namespace tests { + +TEST(host_storage, external_stroage) { + const size_t size{100}; + const auto a = + std::make_shared(paddle::platform::CPUPlace()); + pten::intrusive_ptr in_storage = + pten::make_intrusive(a, size); + char* data = static_cast(in_storage->data()); + for (size_t i = 0; i < size; ++i) { + data[i] = i; + } + const size_t delta{1}; + const size_t n{10}; + auto ex_storage = pten::make_intrusive(in_storage, delta, n); + CHECK_EQ(ex_storage->size(), n); + CHECK(paddle::platform::is_cpu_place(ex_storage->place())); + CHECK(!ex_storage->OwnsMemory()); + for (size_t i = delta; i < delta + n; ++i) { + CHECK_EQ(data[i], static_cast(i)); + } +} + +TEST(host_storage, external_vector) { + std::vector data(100); + for (size_t i = 0; i < data.size(); ++i) { + data[i] = i; + } + const size_t delta{1}; + const size_t n{10}; + auto ex_storage = pten::make_intrusive( + data.data(), n, paddle::platform::CPUPlace()); + CHECK_EQ(ex_storage->size(), n); + CHECK(paddle::platform::is_cpu_place(ex_storage->place())); + CHECK(!ex_storage->OwnsMemory()); + for (size_t i = delta; i < delta + n; ++i) { + CHECK_EQ(data[i], static_cast(i)); + } +} +} // namespace tests +} // namespace experimental +} // namespace paddle diff --git a/paddle/pten/hapi/lib/utils/tests/test_tensor_utils.cc b/paddle/pten/hapi/lib/utils/tests/test_tensor_utils.cc new file mode 100644 index 0000000000000..64ef1972d8d5a --- /dev/null +++ b/paddle/pten/hapi/lib/utils/tests/test_tensor_utils.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "gtest/gtest.h" + +#include "paddle/pten/hapi/lib/utils/tensor_utils.h" + +namespace paddle { +namespace experimental { +namespace tests { + +using DDim = paddle::framework::DDim; +using DataType = paddle::experimental::DataType; +using DataLayout = paddle::experimental::DataLayout; + +using DenseTensor = pten::candidate::DenseTensor; +using DenseTensorMeta = pten::candidate::DenseTensorMeta; + +TEST(tensor_utils, dense_tensor_to_lod_tensor) { + const DDim dims({2, 1}); + const DataType dtype{DataType::FLOAT32}; + const DataLayout layout{DataLayout::NCHW}; + const std::vector> lod{{0, 2}}; + DenseTensorMeta meta(dtype, dims, layout, lod); + + auto alloc = std::make_shared(platform::CPUPlace()); + + DenseTensor dense_tensor(alloc, meta); + float* data = dense_tensor.mutable_data(); + data[0] = 1.0f; + data[1] = 2.1f; + + framework::LoDTensor lod_tensor; + MovesStorage(&dense_tensor, &lod_tensor); + + CHECK(dense_tensor.lod().size() == lod_tensor.lod().size()); + CHECK(dense_tensor.lod()[0] == + static_cast>((lod_tensor.lod()[0]))); + CHECK(dense_tensor.data_type() == pten::TransToPtDataType(lod_tensor.type())); + CHECK(dense_tensor.layout() == + pten::TransToPtDataLayout(lod_tensor.layout())); + CHECK(platform::is_cpu_place(lod_tensor.place())); + + CHECK(lod_tensor.data()[0] == 1.0f); + CHECK(lod_tensor.data()[1] == 2.1f); + + auto dense_tensor_1 = MakeSharedDenseTensor(lod_tensor); + CHECK(dense_tensor_1->dims() == dims); + CHECK(dense_tensor_1->data_type() == dtype); + CHECK(dense_tensor_1->layout() == layout); + CHECK(dense_tensor_1->lod().size() == lod.size()); + CHECK(dense_tensor_1->lod()[0] == lod[0]); + const float* data_1 = dense_tensor_1->data(); + CHECK(data_1[0] == 1.0f); + CHECK(data_1[1] == 2.1f); +} + +TEST(tensor_utils, dense_tensor_to_tensor) { + const DDim dims({2, 1}); + const DataType dtype{DataType::FLOAT32}; + const DataLayout layout{DataLayout::NCHW}; + DenseTensorMeta meta(dtype, dims, layout); + + auto alloc = std::make_shared(platform::CPUPlace()); + + DenseTensor dense_tensor(alloc, meta); + float* data = dense_tensor.mutable_data(); + data[0] = 1.0f; + data[1] = 2.1f; + + framework::Tensor tensor; + MovesStorage(&dense_tensor, &tensor); + + CHECK(dense_tensor.data_type() == pten::TransToPtDataType(tensor.type())); + CHECK(dense_tensor.layout() == pten::TransToPtDataLayout(tensor.layout())); + CHECK(platform::is_cpu_place(tensor.place())); + + CHECK(tensor.data()[0] == 1.0f); + CHECK(tensor.data()[1] == 2.1f); + + auto dense_tensor_1 = MakeSharedDenseTensor(tensor); + CHECK(dense_tensor_1->dims() == dims); + CHECK(dense_tensor_1->data_type() == dtype); + CHECK(dense_tensor_1->layout() == layout); + const float* data_1 = dense_tensor_1->data(); + CHECK(data_1[0] == 1.0f); + CHECK(data_1[1] == 2.1f); +} + +} // namespace tests +} // namespace experimental +} // namespace paddle