diff --git a/paddle/pten/core/CMakeLists.txt b/paddle/pten/core/CMakeLists.txt index d89b3c9fefb59..7c8ace2bc7ef4 100644 --- a/paddle/pten/core/CMakeLists.txt +++ b/paddle/pten/core/CMakeLists.txt @@ -16,6 +16,9 @@ cc_library(lod_utils SRCS lod_utils.cc DEPS enforce mixed_vector) cc_library(dense_tensor SRCS dense_tensor.cc DEPS convert_utils tensor_meta tensor_base) cc_library(pten_device_context SRCS device_context.cc DEPS tensor_base ) + +cc_library(meta_tensor SRCS meta_tensor.cc DEPS tensor_base tensor_meta dense_tensor) + cc_test(unroll_array_ops_test SRCS unroll_array_ops_test.cc) cc_library(ddim SRCS ddim.cc DEPS eigen3 boost enforce) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) diff --git a/paddle/pten/core/kernel_registry.h b/paddle/pten/core/kernel_registry.h index 194ab52d25688..e1160ea6b7d5d 100644 --- a/paddle/pten/core/kernel_registry.h +++ b/paddle/pten/core/kernel_registry.h @@ -24,6 +24,7 @@ #include "paddle/pten/core/kernel_def.h" #include "paddle/pten/core/kernel_factory.h" #include "paddle/pten/core/kernel_utils.h" +#include "paddle/pten/core/macros.h" #include "paddle/fluid/platform/enforce.h" @@ -158,33 +159,6 @@ struct KernelRegistrar { } }; -#define PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ - _PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) - -#define _PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ - struct __test_global_namespace_##uniq_name##__ {}; \ - static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \ - __test_global_namespace_##uniq_name##__>::value, \ - msg) - -#ifdef __COUNTER__ -#define PT_ID __COUNTER__ -#else -#define PT_ID __LINE__ -#endif - -#if defined(_WIN32) -#define UNUSED -#define __builtin_expect(EXP, C) (EXP) -#else -#define UNUSED __attribute__((unused)) -#endif - -#define PT_CONCATENATE(arg1, arg2) PT_CONCATENATE1(arg1, arg2) -#define PT_CONCATENATE1(arg1, arg2) PT_CONCATENATE2(arg1, arg2) -#define PT_CONCATENATE2(arg1, arg2) arg1##arg2 -#define PT_EXPAND(x) x - /** * Reference: * @@ -834,6 +808,9 @@ struct KernelRegistrar { * to avoid being removed by linker */ #define PT_DECLARE_KERNEL(kernel_name, backend, layout) \ + PT_STATIC_ASSERT_GLOBAL_NAMESPACE( \ + pt_declare_tp_kernel_ns_check_##kernel_name##_##backend##_##layout, \ + "PT_DECLARE_KERNEL must be called in global namespace."); \ extern int TouchKernelSymbolFor_##kernel_name##_##backend##_##layout(); \ UNUSED static int \ __declare_kernel_symbol_for_##kernel_name##_##backend##_##layout = \ diff --git a/paddle/pten/core/macros.h b/paddle/pten/core/macros.h new file mode 100644 index 0000000000000..fec67b1a3dc25 --- /dev/null +++ b/paddle/pten/core/macros.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +namespace pten { + +// Disable the copy and assignment operator for a class. +#ifndef DISABLE_COPY_AND_ASSIGN +#define DISABLE_COPY_AND_ASSIGN(classname) \ + private: \ + classname(const classname&) = delete; \ + classname(classname&&) = delete; \ + classname& operator=(const classname&) = delete; \ + classname& operator=(classname&&) = delete +#endif + +#define PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ + _PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) + +#define _PT_STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ + struct __test_global_namespace_##uniq_name##__ {}; \ + static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \ + __test_global_namespace_##uniq_name##__>::value, \ + msg) + +#ifdef __COUNTER__ +#define PT_ID __COUNTER__ +#else +#define PT_ID __LINE__ +#endif + +#if defined(_WIN32) +#define UNUSED +#define __builtin_expect(EXP, C) (EXP) +#else +#define UNUSED __attribute__((unused)) +#endif + +#define PT_CONCATENATE(arg1, arg2) PT_CONCATENATE1(arg1, arg2) +#define PT_CONCATENATE1(arg1, arg2) PT_CONCATENATE2(arg1, arg2) +#define PT_CONCATENATE2(arg1, arg2) arg1##arg2 +#define PT_EXPAND(x) x + +} // namespace pten diff --git a/paddle/pten/core/meta_tensor.cc b/paddle/pten/core/meta_tensor.cc new file mode 100644 index 0000000000000..f52d771b73bb9 --- /dev/null +++ b/paddle/pten/core/meta_tensor.cc @@ -0,0 +1,86 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/pten/core/meta_tensor.h" + +#include "paddle/pten/core/compat_utils.h" +#include "paddle/pten/core/dense_tensor.h" + +#include "paddle/fluid/platform/enforce.h" + +namespace pten { + +int64_t MetaTensor::numel() const { return tensor_->numel(); } + +DDim MetaTensor::dims() const { return tensor_->dims(); } + +DataType MetaTensor::dtype() const { return tensor_->dtype(); } + +DataLayout MetaTensor::layout() const { return tensor_->layout(); } + +void MetaTensor::set_dims(const DDim& dims) { + if (pten::DenseTensor::classof(tensor_)) { + CompatibleDenseTensorUtils::GetMutableMeta( + static_cast(tensor_)) + ->dims = dims; + } else { + PADDLE_THROW(paddle::platform::errors::Unimplemented( + "Unsupported setting dims for `%s`.", tensor_->type_info().name())); + } +} + +void MetaTensor::set_dtype(DataType dtype) { + if (pten::DenseTensor::classof(tensor_)) { + CompatibleDenseTensorUtils::GetMutableMeta( + static_cast(tensor_)) + ->dtype = dtype; + } else { + PADDLE_THROW(paddle::platform::errors::Unimplemented( + "Unsupported settting dtype for `%s`.", tensor_->type_info().name())); + } +} + +void MetaTensor::set_layout(DataLayout layout) { + if (pten::DenseTensor::classof(tensor_)) { + CompatibleDenseTensorUtils::GetMutableMeta( + static_cast(tensor_)) + ->layout = layout; + } else { + PADDLE_THROW(paddle::platform::errors::Unimplemented( + "Unsupported settting layout for `%s`.", tensor_->type_info().name())); + } +} + +void MetaTensor::share_lod(const MetaTensor& meta_tensor) { + if (pten::DenseTensor::classof(tensor_)) { + CompatibleDenseTensorUtils::GetMutableMeta( + static_cast(tensor_)) + ->lod = meta_tensor.lod(); + } else { + PADDLE_THROW(paddle::platform::errors::Unimplemented( + "Unsupported share lod inplace for `%s`.", + tensor_->type_info().name())); + } +} + +const LoD& MetaTensor::lod() const { + if (pten::DenseTensor::classof(tensor_)) { + return static_cast(tensor_)->lod(); + } else { + PADDLE_THROW(paddle::platform::errors::Unimplemented( + "Unsupported setting dims for `%s`.", tensor_->type_info().name())); + } +} + +} // namespace pten diff --git a/paddle/pten/core/meta_tensor.h b/paddle/pten/core/meta_tensor.h new file mode 100644 index 0000000000000..4273aa6f85b4e --- /dev/null +++ b/paddle/pten/core/meta_tensor.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/pten/common/data_type.h" +#include "paddle/pten/common/layout.h" +#include "paddle/pten/core/macros.h" +#include "paddle/pten/core/tensor_base.h" +#include "paddle/pten/core/tensor_meta.h" + +// See Note [ Why still include the fluid headers? ] +#include "paddle/fluid/framework/ddim.h" + +namespace pten { + +class MetaTensor { + public: + explicit MetaTensor(TensorBase* tensor) : tensor_(tensor) {} + + MetaTensor() = default; + MetaTensor(const MetaTensor&) = default; + MetaTensor(MetaTensor&&) = default; + MetaTensor& operator=(const MetaTensor&) = delete; + MetaTensor& operator=(MetaTensor&&) = delete; + + virtual ~MetaTensor() = default; + + virtual int64_t numel() const; + virtual DDim dims() const; + virtual DataType dtype() const; + virtual DataLayout layout() const; + virtual void set_dims(const DDim& dims); + virtual void set_dtype(DataType dtype); + virtual void set_layout(DataLayout layout); + virtual void share_lod(const MetaTensor& meta_tensor); + + private: + const LoD& lod() const; + TensorBase* tensor_; +}; + +} // namespace pten