Skip to content

Commit

Permalink
Merge branch 'op2func_refactor' into dev/op2func_refactor_
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Oct 19, 2021
2 parents 864e602 + 1dd0145 commit 4ec1c2c
Show file tree
Hide file tree
Showing 28 changed files with 512 additions and 618 deletions.
318 changes: 92 additions & 226 deletions paddle/fluid/framework/operator.cc

Large diffs are not rendered by default.

36 changes: 20 additions & 16 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ inline std::string GradOriginalVarName(const std::string& grad_var_name) {
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);

OpKernelType TransPtKernelKeyToOpKernelType(const pt::KernelKey& kernel_key);

class ExecutionContext;
class OperatorBase;

Expand Down Expand Up @@ -534,13 +532,15 @@ class OperatorWithKernel : public OperatorBase {
}

/* member functions for adapting to tcmpt lib */
// TODO(chenweihang): Temporarily as a class method
virtual pt::KernelKey ConstructPtKernelKey(
const VariableValueMap& inputs, const AttributeMap& attrs,
const platform::Place& ctx_place) const;

virtual pt::KernelContext ConstructPtKernelContext(
const RuntimeContext& ctx, const platform::DeviceContext& dev_ctx) const;
/** In the Tensor calculation library, the new Kernel adopts a clearer and
* more streamlined design. The arguments of the Kernel and the input and
* output arguments registered in the original OpMaker do not match in some
* cases, so we use map to record the arguments required by the kernel.
* When selecting Kernel during Op execution, select the arguments of the
* original Op according to the GetExpectedPtKernelArgs returned arguments.
*/
virtual KernelSignature GetExpectedPtKernelArgs(
const ExecutionContext& ctx) const;

private:
void RunImpl(const Scope& scope, const platform::Place& place) const final;
Expand All @@ -563,8 +563,9 @@ class OperatorWithKernel : public OperatorBase {
const std::vector<std::string>& inplace_vars,
const Scope& exec_scope) const;

void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
const platform::Place& place) const;
OpKernelType InnerGetExpectedKernelType(const ExecutionContext& ctx) const;

void ChooseKernel(const ExecutionContext& ctx) const;

void HandleComplexGradToRealGrad(const Scope& scope,
RuntimeContext* ctx) const;
Expand All @@ -582,8 +583,10 @@ class OperatorWithKernel : public OperatorBase {
const std::string& name) const;

/* member functions for adapting to tcmpt lib */
void ChoosePtKernel(const RuntimeContext& ctx,
const platform::DeviceContext& dev_ctx) const;
void ChoosePtKernel(const ExecutionContext& ctx) const;

pt::KernelContext BuildPtKernelContext(
const RuntimeContext& ctx, const platform::DeviceContext& dev_ctx) const;

protected:
mutable std::unique_ptr<OpKernelType> kernel_type_;
Expand All @@ -595,10 +598,11 @@ class OperatorWithKernel : public OperatorBase {
mutable bool all_kernels_must_compute_runtime_shape_ = false;
mutable std::mutex cache_update_mutex_;
mutable bool enable_cache_transfer_scope_ = false;
// TODO(chenweihang): Similar duplicate members are used for new tcmpt lib,
// maybe we have better impl methods
// NOTE(chenweihang): Similar op members are used to adapt to
// new tcmpt kernel, if there is a better design in the future,
// we may polish the implementation here
mutable bool run_pt_kernel_ = false;
mutable std::unique_ptr<pt::KernelKey> pt_kernel_key_;
mutable std::unique_ptr<KernelSignature> pt_kernel_signature_;
mutable std::unique_ptr<pt::Kernel> pt_kernel_;
};

Expand Down
117 changes: 115 additions & 2 deletions paddle/fluid/framework/tcmpt_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <sstream>

#include "paddle/fluid/framework/tcmpt_utils.h"

#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/string/string_helper.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -62,7 +65,7 @@ std::shared_ptr<pt::DenseTensor> MakeTensorImpl<pt::DenseTensor>(
proto::VarType::Type type) {
return MakeTensorImpl<pt::DenseTensor, LoDTensor>(
tensor, pt::TransToPtBackend(place), pt::TransToPtDataType(type),
pt::TransToPtLayout(tensor.layout()));
pt::TransToPtDataLayout(tensor.layout()));
}

template <>
Expand All @@ -71,7 +74,7 @@ std::shared_ptr<pt::DenseTensor> MakeTensorImpl<pt::DenseTensor>(
proto::VarType::Type type) {
return MakeTensorImpl<pt::DenseTensor, Tensor>(
tensor, pt::TransToPtBackend(place), pt::TransToPtDataType(type),
pt::TransToPtLayout(tensor.layout()));
pt::TransToPtDataLayout(tensor.layout()));
}

std::shared_ptr<tcmpt::TensorBase> InputVariableToPtTensor(
Expand Down Expand Up @@ -150,5 +153,115 @@ std::shared_ptr<tcmpt::TensorBase> OutputVariableToPtTensor(
return nullptr;
}

OpKernelType TransPtKernelKeyToOpKernelType(const pt::KernelKey& kernel_key) {
proto::VarType::Type data_type = pt::TransToProtoVarType(kernel_key.dtype());
platform::Place place = pt::TransToFluidPlace(kernel_key.backend());
DataLayout data_layout = pt::TransToFluidDataLayout(kernel_key.layout());
LibraryType library_type = LibraryType::kPlain;
if (kernel_key.backend() == pt::Backend::kMKLDNN) {
library_type = LibraryType::kMKLDNN;
} else if (kernel_key.backend() == pt::Backend::kCUDNN) {
library_type = LibraryType::kCUDNN;
} else {
// do nothing
}
// TODO(chenweihang): the customized_type_value is lost
return OpKernelType(data_type, place, data_layout, library_type);
}

pt::KernelKey TransOpKernelTypeToPtKernelKey(const OpKernelType& kernel_type) {
pt::Backend backend = pt::TransToPtBackend(kernel_type.place_);
if (kernel_type.library_type_ == LibraryType::kMKLDNN) {
backend = pt::Backend::kMKLDNN;
} else if (kernel_type.library_type_ == LibraryType::kCUDNN) {
backend = pt::Backend::kCUDNN;
} else {
// do
}
pt::DataLayout layout = pt::TransToPtDataLayout(kernel_type.data_layout_);
pt::DataType dtype = pt::TransToPtDataType(kernel_type.data_type_);
return pt::KernelKey(backend, layout, dtype);
}

KernelSignatureMap& KernelSignatureMap::Instance() {
static KernelSignatureMap g_kernel_signature_map;
return g_kernel_signature_map;
}

const paddle::SmallVector<std::string>&
KernelArgsNameMakerByOpProto::GetInputArgsNames() {
for (int i = 0; i < op_proto_->inputs_size(); ++i) {
auto& in = op_proto_->inputs()[i];
auto& in_name = in.name();
if ((in.has_extra() && in.extra()) || (in.has_quant() && in.quant())) {
VLOG(1) << "Parse PtKernel input: skip extra & quant input - " << in_name;
continue;
}
// If contains dispensable input, we should override the
// GetExpectedPtKernelArgs method self
if (in.has_dispensable() && in.dispensable()) {
VLOG(1) << "Parse PtKernel input: skip dispensable input - " << in_name;
continue;
}
VLOG(1) << "Parse PtKernel input: " << in_name;
input_names_.emplace_back(in_name);
}
return input_names_;
}

const paddle::SmallVector<std::string>&
KernelArgsNameMakerByOpProto::GetOutputArgsNames() {
for (int i = 0; i < op_proto_->outputs_size(); ++i) {
auto& out = op_proto_->outputs()[i];
auto& out_name = out.name();
// TODO(chenweihang): outputs also need skip some cases
VLOG(1) << "Parse PtKernel output: " << out_name;
output_names_.emplace_back(out_name);
}
return output_names_;
}

const paddle::SmallVector<std::string>&
KernelArgsNameMakerByOpProto::GetAttrsArgsNames() {
for (int i = 0; i < op_proto_->attrs_size(); ++i) {
auto& attr = op_proto_->attrs()[i];
auto& attr_name = attr.name();
if (attr_name == "use_mkldnn" || attr_name == "op_role" ||
attr_name == "op_role_var" || attr_name == "op_namescope" ||
attr_name == "op_callstack" || attr_name == "op_device") {
VLOG(1) << "Parse PtKernel attribute: skip needless attr - " << attr_name;
continue;
}
if ((attr.has_extra() && attr.extra()) ||
(attr.has_quant() && attr.quant())) {
VLOG(1) << "Parse PtKernel attribute: skip extra & quant attr - "
<< attr_name;
continue;
}
VLOG(1) << "Parse PtKernel attribute: " << attr_name;
attr_names_.emplace_back(attr_name);
}

return attr_names_;
}

KernelSignature KernelArgsNameMakerByOpProto::GetKernelSignature() {
return std::make_pair(
op_proto_->type(),
std::make_tuple(GetInputArgsNames(), GetAttrsArgsNames(),
GetOutputArgsNames()));
}

std::string KernelSignatureToString(const KernelSignature& signature) {
std::stringstream os;
os << "Kernel Signature - name: " << signature.first << "; inputs: "
<< string::join_strings(std::get<0>(signature.second), ", ")
<< "; attributes: "
<< string::join_strings(std::get<1>(signature.second), ", ")
<< "; outputs: "
<< string::join_strings(std::get<2>(signature.second), ", ");
return os.str();
}

} // namespace framework
} // namespace paddle
82 changes: 81 additions & 1 deletion paddle/fluid/framework/tcmpt_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,25 @@ limitations under the License. */

#pragma once

#include <string>
#include <unordered_map>
#include <vector>

#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/place.h"

#include "paddle/tcmpt/api/include/core.h"
#include "paddle/utils/flat_hash_map.h"
#include "paddle/utils/small_vector.h"

namespace paddle {
namespace framework {

/* tensor translate */

template <typename PtTensorImplT, typename VariableT>
std::shared_ptr<PtTensorImplT> MakeTensorImpl(const VariableT& tensor,
pt::Backend backend,
Expand Down Expand Up @@ -49,5 +60,74 @@ std::shared_ptr<tcmpt::TensorBase> InputVariableToPtTensor(
std::shared_ptr<tcmpt::TensorBase> OutputVariableToPtTensor(
framework::Variable* variable, const pt::TensorArgDef& arg_def);

/* Kernel Key translate */

OpKernelType TransPtKernelKeyToOpKernelType(const pt::KernelKey& kernel_key);
pt::KernelKey TransOpKernelTypeToPtKernelKey(const OpKernelType& kernel_type);

/* Kernel Args parse */

// TODO(chenweihang): we can generate this map by proto info in compile time
class KernelSignatureMap {
public:
static KernelSignatureMap& Instance();

bool Has(const std::string& op_type) const {
return map_.find(op_type) != map_.end();
}

void Insert(const std::string& op_type, const KernelSignature& signature) {
if (!Has(op_type)) {
map_.insert({op_type, signature});
}
}

const KernelSignature* GetNullable(const std::string& op_type) const {
auto it = map_.find(op_type);
if (it == map_.end()) {
return nullptr;
} else {
return &it->second;
}
}

private:
KernelSignatureMap() = default;
paddle::flat_hash_map<std::string, KernelSignature> map_;

DISABLE_COPY_AND_ASSIGN(KernelSignatureMap);
};

class KernelArgsNameMaker {
public:
virtual ~KernelArgsNameMaker() {}
virtual const paddle::SmallVector<std::string>& GetInputArgsNames() = 0;
virtual const paddle::SmallVector<std::string>& GetOutputArgsNames() = 0;
virtual const paddle::SmallVector<std::string>& GetAttrsArgsNames() = 0;
};

class KernelArgsNameMakerByOpProto : public KernelArgsNameMaker {
public:
explicit KernelArgsNameMakerByOpProto(framework::proto::OpProto* op_proto)
: op_proto_(op_proto) {}

~KernelArgsNameMakerByOpProto() {}

const paddle::SmallVector<std::string>& GetInputArgsNames() override;
const paddle::SmallVector<std::string>& GetOutputArgsNames() override;
const paddle::SmallVector<std::string>& GetAttrsArgsNames() override;

KernelSignature GetKernelSignature();

private:
framework::proto::OpProto* op_proto_;

paddle::SmallVector<std::string> input_names_;
paddle::SmallVector<std::string> output_names_;
paddle::SmallVector<std::string> attr_names_;
};

std::string KernelSignatureToString(const KernelSignature& signature);

} // namespace framework
} // namespace paddle
10 changes: 8 additions & 2 deletions paddle/fluid/framework/tcmpt_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,19 @@ TEST(TcmptUtils, VarToPtTensor) {
auto* data =
value->mutable_data<int>(make_ddim({1, 1}), paddle::platform::CPUPlace());
data[0] = 123;
auto tensor_def = pt::TensorArgDef(pt::Backend::kCUDA, pt::DataLayout::kNCHW,
pt::Backend expect_backend = pt::Backend::kCPU;

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
expect_backend = pt::Backend::kCUDA;
#endif
auto tensor_def = pt::TensorArgDef(expect_backend, pt::DataLayout::kNCHW,
pt::DataType::kINT32);
// 2. test API
auto tensor_x = InputVariableToPtTensor(v, tensor_def);
// 3. check result
ASSERT_EQ(tensor_x->backend(), pt::Backend::kCUDA);
ASSERT_EQ(tensor_x->backend(), expect_backend);
ASSERT_EQ(tensor_x->data_type(), pt::DataType::kINT32);

}

} // namespace framework
Expand Down
10 changes: 10 additions & 0 deletions paddle/fluid/framework/type_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,13 @@ limitations under the License. */
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/variant.h"
#include "paddle/utils/small_vector.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -82,5 +84,13 @@ using InferShapeFN = std::function<void(InferShapeContext*)>;
using InplacePair = std::unordered_map<std::string, std::string>;
using InferInplaceOpFN = std::function<InplacePair(bool /*use_cuda*/)>;

// tuple(input_names, attr_names, output_names)
using KernelArgsTuple = std::tuple<paddle::SmallVector<std::string>,
paddle::SmallVector<std::string>,
paddle::SmallVector<std::string>>;
// TODD(yuanrisheng): impl implicit overload signature, use KernelArgsTuple
// directly
using KernelSignature = std::pair<std::string, KernelArgsTuple>;

} // namespace framework
} // namespace paddle
Loading

1 comment on commit 4ec1c2c

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 4ec1c2c Oct 19, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #17 Commit ID: 4ec1c2c contains failed CI.

🔹 Failed: PR-CI-OP-benchmark

Unknown Failed
2021-10-19 11:56:38 + echo '[tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "mean"(paddle/fluid/operators/mean_op.cu) in benchmark.'
2021-10-19 11:56:38 [tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "mean"(paddle/fluid/operators/mean_op.cu) in benchmark.
2021-10-19 11:56:38 + for op_name in '${!CHANGE_OP_MAP[@]}'
2021-10-19 11:56:38 + '[' -z '' ']'
2021-10-19 11:56:38 + exit_code=8
2021-10-19 11:56:38 + LOG '[ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.'
2021-10-19 11:56:38 + echo '[tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.'
2021-10-19 11:56:38 [tools/test_ci_op_benchmark.sh:271] [ERROR] Missing test script of "fill_any_like"(paddle/fluid/operators/fill_any_like_op.cu) in benchmark.
2021-10-19 11:56:38 + for op_name in '${!CHANGE_OP_MAP[@]}'
2021-10-19 11:56:38 + '[' -z matmul,matmul,matmul.json,True ']'
2021-10-19 11:56:38 + '[' 8 -ne 0 ']'
2021-10-19 11:56:38 + LOG '[INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-19 11:56:38 + echo '[tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.'
2021-10-19 11:56:38 [tools/test_ci_op_benchmark.sh:275] [INFO] See https://github.com/PaddlePaddle/Paddle/wiki/PR-CI-OP-benchmark-Manual for details.
2021-10-19 11:56:38 + LOG '[INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-19 11:56:38 + echo '[tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.'
2021-10-19 11:56:38 [tools/test_ci_op_benchmark.sh:276] [INFO] Or you can apply for one RD (Avin0323(Recommend), Xreki, luotao1) approval to pass this PR.
2021-10-19 11:56:38 + exit 8
2021-10-19 11:56:38 {build code state=8}

🔹 Failed: PR-CI-CPU-Py2

code_style_failed
2021-10-19 12:05:38     with_avx: ON
2021-10-19 12:05:38 with_gpu: ON
2021-10-19 12:05:38 with_mkl: ON
2021-10-19 12:05:38 with_mkldnn: ON
2021-10-19 12:05:38 with_python: ON'
2021-10-19 12:05:38 + set +x
2021-10-19 12:05:38 ========================================
2021-10-19 12:05:38 summary problems:
2021-10-19 12:05:38 There is 1 error: Code format error.
2021-10-19 12:05:38 ========================================
2021-10-19 12:05:38 Code format error Please fix it according to the diff information:
2021-10-19 12:05:38 code format error
2021-10-19 12:05:38 diff --git a/paddle/fluid/framework/tcmpt_utils_test.cc b/paddle/fluid/framework/tcmpt_utils_test.cc
2021-10-19 12:05:38 index 6d850c96c0..200bd5429c 100644
2021-10-19 12:05:38 --- a/paddle/fluid/framework/tcmpt_utils_test.cc
2021-10-19 12:05:38 +++ b/paddle/fluid/framework/tcmpt_utils_test.cc
2021-10-19 12:05:38 @@ -61,7 +61,6 @@ TEST(TcmptUtils, VarToPtTensor) {
2021-10-19 12:05:38 // 3. check result
2021-10-19 12:05:38 ASSERT_EQ(tensor_x->backend(), expect_backend);
2021-10-19 12:05:38 ASSERT_EQ(tensor_x->data_type(), pt::DataType::kINT32);
2021-10-19 12:05:38 -

Please sign in to comment.