Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

matmul+activation fuse pass #43519

Merged
merged 38 commits into from
Jul 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
46dec52
add method for post ops
Silv3S Jun 4, 2022
14cf236
format code
Silv3S Jun 4, 2022
c370d2c
gpd
Silv3S Jun 6, 2022
6e830ee
Merge branch 'develop' into matmul_activation
Silv3S Jun 9, 2022
acd609e
format style
Silv3S Jun 9, 2022
8889c29
add matmul+act test
Silv3S Jun 9, 2022
d055527
implement matmul+activation
Silv3S Jun 14, 2022
a518f94
whitespaces
Silv3S Jun 14, 2022
d65f39a
Merge branch 'PaddlePaddle:develop' into matmul_activation
Silv3S Jun 14, 2022
18eef24
code style
Silv3S Jun 14, 2022
6df8606
Merge branch 'matmul_activation' of https://github.com/Silv3S/Paddle …
Silv3S Jun 14, 2022
695a570
python code format
Silv3S Jun 14, 2022
d7ac698
Increase UT timeout
Silv3S Jun 14, 2022
e0de057
code format
Silv3S Jun 14, 2022
422c41c
Merge branch 'PaddlePaddle:develop' into matmul_activation
Silv3S Jun 23, 2022
4450745
Merge branch 'develop' into matmul_activation
Silv3S Jun 28, 2022
008bf1b
update style
Silv3S Jun 28, 2022
48827ec
generalize activation fuse passes
Silv3S Jun 29, 2022
d03da6c
Merge branch 'operator_activation_fuse_pass' into matmul_activation
Silv3S Jun 29, 2022
691a77c
change order
Silv3S Jun 29, 2022
9a84366
Unify activation GPD
Silv3S Jun 29, 2022
06d0f9d
Merge branch 'matmul_activation' of https://github.com/Silv3S/Paddle …
Silv3S Jun 29, 2022
e6ac214
Revert changes with op_act
Silv3S Jun 30, 2022
0b9a000
remove softmax mkldnn attrs
Silv3S Jun 30, 2022
cb4bccf
set common name for act attributes
Silv3S Jun 30, 2022
bab7fba
whitespace
Silv3S Jun 30, 2022
2cfd74c
Merge branch 'PaddlePaddle:develop' into matmul_activation
Silv3S Jun 30, 2022
3643924
append postops by helper function
Silv3S Jul 1, 2022
e29898d
ut style
Silv3S Jul 1, 2022
f9c6b0c
revert changes related to quantization
Silv3S Jul 3, 2022
942ba41
Reduce redundancy
Silv3S Jul 4, 2022
725161e
reduce number of parameters
Silv3S Jul 5, 2022
1baa195
Merge branch 'PaddlePaddle:develop' into matmul_activation
Silv3S Jul 5, 2022
873cd94
Merge branch 'PaddlePaddle:develop' into matmul_activation
Silv3S Jul 5, 2022
ba17cd4
trigger CI
Silv3S Jul 5, 2022
991431d
validate attribute
Silv3S Jul 6, 2022
a8368b9
Merge branch 'develop' into matmul_activation
Silv3S Jul 11, 2022
7e176a1
trim unit test
Silv3S Jul 11, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/fluid/framework/ir/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ if(WITH_MKLDNN)
pass_library(shuffle_channel_mkldnn_detect_pass inference DIR mkldnn)
pass_library(fc_act_mkldnn_fuse_pass inference DIR mkldnn)
pass_library(elt_act_mkldnn_fuse_pass inference DIR mkldnn)
pass_library(matmul_activation_mkldnn_fuse_pass inference DIR mkldnn)
pass_library(cpu_quantize_placement_pass base DIR mkldnn)
pass_library(cpu_quantize_pass inference DIR mkldnn)
pass_library(cpu_quantize_squash_pass inference DIR mkldnn)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ using string::PrettyLogDetail;

void ConvActivationMkldnnFusePass::ApplyImpl(Graph* graph) const {
auto act_types = paddle::platform::GetSupportedActivations();

std::vector<std::string> conv_types = {"conv2d"};

for (const auto& conv_type : conv_types)
Expand Down
281 changes: 281 additions & 0 deletions paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.h"

#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"
#include "paddle/fluid/string/pretty_log.h"

namespace paddle {
namespace framework {
namespace ir {

using string::PrettyLogDetail;

void MatmulActivationMkldnnFusePass::ApplyImpl(Graph* graph) const {
auto act_types = paddle::platform::GetSupportedActivations();
std::vector<std::string> matmul_types = {"matmul"};

for (const auto& matmul_type : matmul_types)
for (auto& act_type : act_types) {
FuseMatmulAct(graph, matmul_type, act_type);
}
}

void MatmulActivationMkldnnFusePass::FuseMatmulAct(
Graph* graph, const std::string& matmul_type, std::string& act_type) const {
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(matmul_type + "_" + act_type + "_mkldnn_fuse_pass", graph);

GraphPatternDetector gpd;
patterns::OperatorActivation matmul_act_pattern(
gpd.mutable_pattern(), "matmul_activation_mkldnn_fuse");
matmul_act_pattern(matmul_type, act_type);

int found_matmul_activation_count = 0;
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) {
VLOG(4) << "handle " + matmul_type + "+" + act_type + " fuse";

if (!IsCompat(subgraph, g)) {
LOG(WARNING) << "matmul_activation_mkldnn_fuse_pass op compat failed.";
return;
}
Silv3S marked this conversation as resolved.
Show resolved Hide resolved

GET_IR_NODE_FROM_SUBGRAPH(matmul, preceding_op, matmul_act_pattern);
GET_IR_NODE_FROM_SUBGRAPH(matmul_out, preceding_op_out, matmul_act_pattern);
GET_IR_NODE_FROM_SUBGRAPH(activation, activation, matmul_act_pattern);
GET_IR_NODE_FROM_SUBGRAPH(
activation_out, activation_out, matmul_act_pattern);

OpDesc* matmul_op = matmul->Op();
OpDesc* act_op = activation->Op();

auto attr_map = paddle::platform::GetAttributeMap(act_type);
for (const auto& attrs : attr_map) {
if (act_op->HasAttr(attrs.first)) {
matmul_op->SetAttr(attrs.second, act_op->GetAttr(attrs.first));
}
}

if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) {
act_type = BOOST_GET_CONST(bool, activation->Op()->GetAttr("approximate"))
? "gelu_tanh"
: "gelu_erf";
}
matmul_op->SetAttr("fuse_activation", act_type);
matmul_op->SetOutput("Out", {activation_out->Name()});

IR_NODE_LINK_TO(matmul, activation_out);
GraphSafeRemoveNodes(graph, {activation, matmul_out});
found_matmul_activation_count++;
};

gpd(graph, handler);
AddStatis(found_matmul_activation_count);
if (!Has("disable_logs") || !Get<bool>("disable_logs")) {
PrettyLogDetail("--- fused %d matmul with %s activation",
found_matmul_activation_count,
act_type);
}
Comment on lines +89 to +93
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be nice to move this if inside PrettyLogDetail but I won't enforce it.

}

MatmulActivationMkldnnFusePass::MatmulActivationMkldnnFusePass() {
AddOpCompat(OpCompat("matmul"))
.AddInput("X")
.IsTensor()
.End()
.AddInput("Y")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("alpha")
.IsType<float>()
.End()
.AddAttr("transpose_X")
.IsType<bool>()
.End()
.AddAttr("transpose_Y")
.IsType<bool>()
.End();

AddOpCompat(OpCompat("abs"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();

AddOpCompat(OpCompat("clip"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("min")
.End()
.AddAttr("max")
.End();

AddOpCompat(OpCompat("gelu"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("approximate")
.IsType<bool>()
.End();

AddOpCompat(OpCompat("hard_sigmoid"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("slope")
.IsOptional()
.IsType<float>()
.End()
.AddAttr("offset")
.IsOptional()
.IsType<float>()
.End();

AddOpCompat(OpCompat("hard_swish"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("threshold")
.IsOptional()
.IsType<float>()
.End()
.AddAttr("scale")
.IsOptional()
.IsType<float>()
.End()
.AddAttr("offset")
.IsOptional()
.IsType<float>()
.End();

AddOpCompat(OpCompat("leaky_relu"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("alpha")
.IsType<float>()
.End();

AddOpCompat(OpCompat("mish"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();

AddOpCompat(OpCompat("relu"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();

AddOpCompat(OpCompat("relu6"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("threshold")
.IsType<float>()
.End();

AddOpCompat(OpCompat("sigmoid"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();

AddOpCompat(OpCompat("sqrt"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();

AddOpCompat(OpCompat("swish"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End()
.AddAttr("beta")
.IsType<float>()
.End();

AddOpCompat(OpCompat("tanh"))
.AddInput("X")
.IsTensor()
.End()
.AddOutput("Out")
.IsTensor()
.End();
}

} // namespace ir
} // namespace framework
} // namespace paddle

REGISTER_PASS(matmul_activation_mkldnn_fuse_pass,
paddle::framework::ir::MatmulActivationMkldnnFusePass);

REGISTER_PASS_CAPABILITY(matmul_activation_mkldnn_fuse_pass)
.AddCombination(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("matmul", 1)
.EQ("abs", 0)
.LE("clip", 1)
.EQ("gelu", 0)
.EQ("hard_sigmoid", 0)
.LE("hard_swish", 0)
.LE("leaky_relu", 1)
.LE("mish", 1)
.EQ("relu", 0)
.EQ("relu6", 0)
.EQ("sigmoid", 0)
.EQ("sqrt", 0)
.EQ("swish", 0)
.EQ("tanh", 0));
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <string>

#include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/graph.h"

namespace paddle {
namespace framework {
namespace ir {

class MatmulActivationMkldnnFusePass : public FusePassBase {
public:
MatmulActivationMkldnnFusePass();
virtual ~MatmulActivationMkldnnFusePass() {}

protected:
void ApplyImpl(Graph *graph) const override;

void FuseMatmulAct(Graph *graph,
const std::string &matmul_type,
std::string &act_type) const;
};

} // namespace ir
} // namespace framework
} // namespace paddle
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ void MainTest(const std::string& activation_type) {
const auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")));
ASSERT_TRUE(op->HasAttr("fuse_activation_type"));
ASSERT_TRUE(op->HasAttr("fuse_activation"));
auto activation_type =
BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation_type"));
BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation"));
EXPECT_EQ(activation_type.compare(activation_type), 0);
}
}
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/inference/api/paddle_pass_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@ void CpuPassStrategy::EnableMKLDNN() {
"softplus_activation_mkldnn_fuse_pass", //
"shuffle_channel_mkldnn_detect_pass", //
"elt_act_mkldnn_fuse_pass", //
"matmul_activation_mkldnn_fuse_pass", //
// TODO(intel): Please fix the bug on windows.
// https://github.com/PaddlePaddle/Paddle/issues/29710
// "mkldnn_inplace_pass", // This pass should be activated after
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/operators/mkldnn/matmul_mkldnn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License. */
#include <tuple>

#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/platform/mkldnn_reuse.h"

using dnnl::memory;
using dnnl::primitive;
Expand Down Expand Up @@ -453,6 +454,8 @@ class MatMulMKLDNNHandler
matmul_attrs.set_output_scales(0, {scale_out});
}

paddle::platform::AppendActivation(ctx, post_operations);

matmul_attrs.set_post_ops(post_operations);
return matmul_attrs;
}
Expand Down
Loading