Skip to content

Commit

Permalink
Add Intermediate API layer
Browse files Browse the repository at this point in the history
  • Loading branch information
YuanRisheng committed Oct 22, 2021
1 parent 76a588e commit 7c41b15
Show file tree
Hide file tree
Showing 40 changed files with 526 additions and 203 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ paddle/fluid/API_DEV.spec
paddle/fluid/API_PR.spec
paddle/fluid/op_use_default_grad_maker_DEV.spec
paddle/fluid/op_use_default_grad_maker_PR.spec
tools/__pycache__/static_mode_white_list.cpython-37.pyc

*.DS_Store
*.vs
Expand Down
26 changes: 5 additions & 21 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ limitations under the License. */
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/pten/common/scalar.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -1080,20 +1081,6 @@ void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
this->InferShape(&infer_shape_ctx);
}

static std::string RuntimeContextDebugString(const RuntimeContext& ctx) {
std::stringstream ss;
ss << "RuntimeContext(Inputs: ";
for (auto& var_pair : ctx.inputs) {
ss << var_pair.first << ", ";
}
ss << "Outputs: ";
for (auto& var_pair : ctx.outputs) {
ss << var_pair.first << ", ";
}
ss << ")";
return ss.str();
}

void OperatorWithKernel::RunImpl(const Scope& scope,
const platform::Place& place) const {
// To reduce the elapsed time of HasAttr, we use bool variable to record the
Expand Down Expand Up @@ -1144,7 +1131,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
// and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
// phase
if (FLAGS_run_pt_kernel &&
pten::KernelFactory::Instance().ContainsKernel(type_.c_str())) {
pten::KernelFactory::Instance().HasCompatiblePtenKernel(type_)) {
if (pt_kernel_signature_.get() == nullptr || pt_kernel_.get() == nullptr) {
ChoosePtenKernel(exe_ctx);
}
Expand Down Expand Up @@ -1651,10 +1638,9 @@ void OperatorWithKernel::ParseInputDataType(
if (t != nullptr) {
PADDLE_ENFORCE_EQ(
t->IsInitialized(), true,
platform::errors::InvalidArgument(
"The Tensor in the %s Op's Input Variable %s(%s) is "
"not initialized.",
Type(), name, Inputs().at(name).at(i)));
platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
"contains uninitialized Tensor.",
Type(), name));
proto::VarType::Type tmp = t->type();
PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
platform::errors::InvalidArgument(
Expand Down Expand Up @@ -1789,8 +1775,6 @@ KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(

pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
const RuntimeContext& ctx, const platform::DeviceContext& dev_ctx) const {
VLOG(1) << RuntimeContextDebugString(ctx);

// TODO(chenweihang): now only work for very simple case,
// many cases need to be deal with later:
// 1. the input and output are not tensor
Expand Down
11 changes: 5 additions & 6 deletions paddle/fluid/framework/operator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -439,9 +439,8 @@ TEST(IndicateVarDataTypeTest, lodtensor) {
std::string ex_msg = err.what();
EXPECT_TRUE(
ex_msg.find(
"The Tensor in the indicate_lod_tensor_data_type_test Op's "
"Input Variable LoDTensor(lodtensor_1) is not initialized") !=
std::string::npos);
"The indicate_lod_tensor_data_type_test Op's Input Variable "
"`LoDTensor` contains uninitialized Tensor.") != std::string::npos);
}
ASSERT_TRUE(caught);
}
Expand All @@ -466,9 +465,9 @@ TEST(IndicateVarDataTypeTest, selectedrows) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(
ex_msg.find("The Tensor in the indicate_selected_rows_data_type_test "
"Op's Input Variable SelectedRows(selected_rows_1) is not "
"initialized") != std::string::npos);
ex_msg.find("The indicate_selected_rows_data_type_test Op's "
"Input Variable `SelectedRows` contains uninitialized "
"Tensor.") != std::string::npos);
}
ASSERT_TRUE(caught);
}
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/imperative/infer_shape_context.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/utils/small_vector.h"
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/xpu/xpu_op_list.h"
Expand Down Expand Up @@ -153,7 +154,7 @@ PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

if (FLAGS_run_pt_kernel &&
pten::KernelFactory::Instance().ContainsKernel(op.Type().c_str())) {
pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);

VLOG(1) << framework::KernelSignatureToString(pt_kernel_signature);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/mean_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
* Currently, only the first two cases are adapted.
*
* The principle here is that the implementation in the kernel must reuse the
* corresponding functions in the Tensor compute library and cannot maintain
* corresponding functions in the Tensor Operation library and cannot maintain
* two copies of the code.
*/
template <typename DeviceContext, typename T>
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ GenerateOpFunctions() {
// since only OperatorWithKernel can run in dygraph mode.
// if the pten lib contains op kernel, we still generate ops method
if (!all_kernels.count(op_type) &&
!pten::KernelFactory::Instance().ContainsKernel(op_type.c_str())) {
!pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) {
continue;
}

Expand Down
1 change: 0 additions & 1 deletion paddle/pten/api/include/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,4 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_context.h"
#include "paddle/pten/core/kernel_factory.h"
#include "paddle/pten/core/scalar.h"
#include "paddle/pten/core/tensor_meta.h"
15 changes: 15 additions & 0 deletions paddle/pten/api/include/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,20 @@

#pragma once

#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/creation.h"
#include "paddle/pten/kernels/cuda/creation.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor FillAnyLike(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& val) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
FillAnyLike<T>(dev_ctx, x, val, &dense_out);
return dense_out;
}

} // namespace pten
15 changes: 15 additions & 0 deletions paddle/pten/api/include/linalg.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,20 @@
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/linalg.h"
#include "paddle/pten/kernels/cuda/linalg.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Dot(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
auto out_meta = DotInferShape(x.meta(), y.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Dot<T>(dev_ctx, x, y, &dense_out);
return dense_out;
}

} // namespace pten
16 changes: 16 additions & 0 deletions paddle/pten/api/include/manipulation.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,21 @@
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/manipulation.h"
#include "paddle/pten/kernels/cuda/manipulation.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Flatten(const ContextT& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Flatten<T>(dev_ctx, x, start_axis, stop_axis, &dense_out);
return dense_out;
}

} // namespace pten
44 changes: 44 additions & 0 deletions paddle/pten/api/include/math.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,49 @@ limitations under the License. */
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/math.h"
#include "paddle/pten/kernels/cuda/math.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Sign<T>(dev_ctx, x, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = ReductionInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Mean<T>(dev_ctx, x, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
float scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Scale<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
ScaleHost<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten
9 changes: 6 additions & 3 deletions paddle/pten/common/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ limitations under the License. */

#include <ostream>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

Expand All @@ -28,8 +30,8 @@ namespace experimental {
* but in order to make the boundary of the kernel clearer and the function
* more specific, we need to distinguish the calculation method.
*
* Such as the kernel for CUDA device, it can be a native CUDA kernel,
* or a kernel implemented by CUDNN library.
* Such as the kernel for CPU device, it can be a native CPU kernel,
* or a kernel implemented by MKLDNN library.
*
* Note(chenweihang): HIP is not needed now, we can added it if needed
* in the future
Expand Down Expand Up @@ -78,7 +80,8 @@ inline std::ostream& operator<<(std::ostream& os, Backend backend) {
os << "CUDNN";
break;
default:
throw std::runtime_error("Invalid Backend type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum backend type `%d`.", static_cast<int>(backend)));
}
return os;
}
Expand Down
5 changes: 2 additions & 3 deletions paddle/pten/common/data_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/float16.h"

namespace paddle {
Expand Down Expand Up @@ -164,8 +163,8 @@ inline std::ostream& operator<<(std::ostream& os, DataType dtype) {
os << "complex128";
break;
default:
// TODO(chenweihang): change to enforce later
throw std::runtime_error("Invalid DataType type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum data type `%d`.", static_cast<int>(dtype)));
}
return os;
}
Expand Down
10 changes: 6 additions & 4 deletions paddle/pten/common/layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ limitations under the License. */

#pragma once

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

Expand All @@ -26,8 +28,8 @@ enum class DataLayout {
NUM_DATA_LAYOUTS,
};

inline std::ostream& operator<<(std::ostream& os, DataLayout dtype) {
switch (dtype) {
inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
switch (layout) {
case DataLayout::UNDEFINED:
os << "Undefined";
break;
Expand All @@ -44,8 +46,8 @@ inline std::ostream& operator<<(std::ostream& os, DataLayout dtype) {
os << "MKLDNN";
break;
default:
// TODO(chenweihang): change to enforce later
throw std::runtime_error("Invalid DataLayout type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum data layout type `%d`.", static_cast<int>(layout)));
}
return os;
}
Expand Down
17 changes: 14 additions & 3 deletions paddle/pten/core/scalar.h → paddle/pten/common/scalar.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,12 @@ limitations under the License. */

#pragma once

namespace pten {
#include <cstdint>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

class Scalar {
public:
Expand Down Expand Up @@ -43,7 +48,8 @@ class Scalar {
case Tag::HAS_B:
return static_cast<T>(data_.b);
default:
throw std::runtime_error("Invalid Scalar type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum scalar type tag `%d`.", static_cast<int>(tag)));
}
}

Expand All @@ -60,4 +66,9 @@ class Scalar {
} data_;
};

} // namespace pten
} // namespace experimental
} // namespace paddle

namespace pten {
using Scalar = paddle::experimental::Scalar;
}
4 changes: 2 additions & 2 deletions paddle/pten/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ class DenseTensor : public TensorBase {

// DenseTensor(const DenseTensor&) = delete;
// DenseTensor& operator=(const DenseTensor&) = delete;
DenseTensor(DenseTensor&&) = delete;
DenseTensor& operator=(DenseTensor&&) = delete;
DenseTensor(DenseTensor&&) = default;
DenseTensor& operator=(DenseTensor&&) = default;

/**
* If we still malloc memory by mutable_data,
Expand Down
Loading

1 comment on commit 7c41b15

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 7c41b15 Oct 22, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #36649 Commit ID: 7c41b15 contains failed CI.

🔹 Failed: PR-CI-APPROVAL

approve_failed
2021-10-22 21:06:53 正在保存至: “bk.txt”
2021-10-22 21:06:53 0K 100% 5.52M=0s
2021-10-22 21:06:53 2021-10-22 21:06:53 (5.52 MB/s) - 已保存 “bk.txt” [5/5])
2021-10-22 21:07:00 ****************
2021-10-22 21:07:00 0. You must have one RD (lanxianghit (Recommend), phlrain or luotao1) approval for changing the FLAGS, which manages the environment variables.
2021-10-22 21:07:00 1. You must have Dianhai approval for change 20+ files or add than 1000+ lines of content.
2021-10-22 21:07:00 2. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for paddle/fluid/framework/operator.h, which manages the underlying code for fluid.
2021-10-22 21:07:00 3. You must have one RD (zhiqiu (Recommend) , phlrain) approval for the changes of paddle/fluid/pybind/op_function_generator.cc, which manages the logic of automatic generating op functions for dygraph.
2021-10-22 21:07:00 4. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for the usage of const_cast.
2021-10-22 21:07:00 5. You must have one RD (Avin0323(Recommend) or zhouwei25 or wanghuancoder or luotao1) approval for modifying unity_build_rule.cmake which the rules of Unity Build.
2021-10-22 21:07:00 There are 6 approved errors.
2021-10-22 21:07:00 ****************
2021-10-22 21:07:00 + EXCODE=6
2021-10-22 21:07:00 + echo 'EXCODE: 6'
2021-10-22 21:07:00 EXCODE: 6
2021-10-22 21:07:00 + echo 'ipipe_log_param_EXCODE: 6'
2021-10-22 21:07:00 ipipe_log_param_EXCODE: 6
2021-10-22 21:07:00 + exit 6

🔹 Failed: PR-CI-OP-benchmark

Unknown Failed
2021-10-23 01:10:21 + echo '[tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "mean"(paddle/fluid/operators/mean_op.cu) in benchmark.'
2021-10-23 01:10:21 [tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "mean"(paddle/fluid/operators/mean_op.cu) in benchmark.
2021-10-23 01:10:21 + for op_name in '${!CHANGE_OP_MAP[@]}'
2021-10-23 01:10:21 + '[' -z '' ']'
2021-10-23 01:10:21 + exit_code=8
2021-10-23 01:10:21 + LOG '[ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.'
2021-10-23 01:10:21 + echo '[tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.'
2021-10-23 01:10:21 [tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.
2021-10-23 01:10:21 + for op_name in '${!CHANGE_OP_MAP[@]}'
2021-10-23 01:10:21 + '[' -z matmul,matmul,matmul.json,True ']'
2021-10-23 01:10:21 + '[' 8 -ne 0 ']'
2021-10-23 01:10:21 + LOG '[INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-23 01:10:21 + echo '[tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-23 01:10:21 [tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.
2021-10-23 01:10:21 + LOG '[INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-23 01:10:21 + echo '[tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-23 01:10:21 [tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.
2021-10-23 01:10:21 + exit 8
2021-10-23 01:10:21 {build code state=8}

🔹 Failed: PR-CI-Coverage

coverage_failed
2021-10-23 02:43:04 + CURL_OPTS='-s --connect-timeout 600 --retry 10 --retry-delay 10'
2021-10-23 02:43:04 ++ uuid
2021-10-23 02:43:04 ++ curl -s --connect-timeout 600 --retry 10 --retry-delay 10 -u paddle:915eedab953b6f51151f50eb https://xly.bce.baidu.com/ipipe/ipipe-report/uuid
2021-10-23 02:43:04 + UPLOAD_FILE=/tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz
2021-10-23 02:43:04 + echo Archiving
2021-10-23 02:43:04 Archiving
2021-10-23 02:43:04 + tar -czvf /tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz .
2021-10-23 02:43:04 ./
2021-10-23 02:43:04 + du -hs /tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz
2021-10-23 02:43:04 4.0K /tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz
2021-10-23 02:43:04 + curl -s --connect-timeout 600 --retry 10 --retry-delay 10 -u paddle:915eedab953b6f51151f50eb -F buildId=8557262 -F path=python-coverage -F file=@/tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz https://xly.bce.baidu.com/ipipe/ipipe-report/upload
2021-10-23 02:43:05 + rm -f /tmp/upload-e205b39b-fb4f-437a-91f8-7727c8803f6d.tar.gz
2021-10-23 02:43:05 report uploaded
2021-10-23 02:43:05 9
2021-10-23 02:43:05 ipipe_log_param_EXCODE: 9
2021-10-23 02:43:05 Sorry, coverage check failed.
2021-10-23 02:43:05 + exit 9
2021-10-23 02:43:05 {build code state=9}
2021-10-23 02:43:15 kill agent BUILD_CODE_FAIL

🔹 Failed: PR-CI-iScan-C

Unknown Failed
Unknown Failed

🔹 Failed: PR-CI-iScan-Python

Unknown Failed
Unknown Failed

Please sign in to comment.