Skip to content

Commit

Permalink
rebuild matmul pass: trt and gpu_cpu
Browse files Browse the repository at this point in the history
  • Loading branch information
Wangzheee committed Feb 8, 2022
1 parent 38da4e9 commit 29c4a74
Show file tree
Hide file tree
Showing 8 changed files with 15 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -410,16 +410,16 @@ def _optimize_fp32_graph(self, graph):
graph = self._apply_pass(graph, 'multi_gru_fuse_pass')
graph = self._apply_pass(graph, 'multi_gru_seq_fuse_pass')
graph = self._apply_pass(graph, 'seq_concat_fc_fuse_pass')
graph = self._apply_pass(graph, 'squeeze2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'reshape2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'flatten2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'gpu_cpu_squeeze2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'gpu_cpu_reshape2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'gpu_cpu_flatten2_matmul_fuse_pass')
graph = self._apply_pass(graph, 'matmul_v2_scale_fuse_pass')
graph = self._apply_pass(graph, 'squared_mat_sub_fuse_pass')
graph = self._apply_pass(graph, 'is_test_pass')
graph = self._apply_pass(graph, 'map_matmul_v2_to_mul_pass')
graph = self._apply_pass(graph, 'map_matmul_v2_to_matmul_pass')
graph = self._apply_pass(graph, 'gpu_cpu_map_matmul_v2_to_mul_pass')
graph = self._apply_pass(graph, 'gpu_cpu_map_matmul_v2_to_matmul_pass')
graph = self._apply_pass(graph, 'matmul_scale_fuse_pass')
graph = self._apply_pass(graph, 'map_matmul_to_mul_pass')
graph = self._apply_pass(graph, 'gpu_cpu_map_matmul_to_mul_pass')
graph = self._apply_pass(graph, 'repeated_fc_relu_fuse_pass')
graph = self._apply_pass(graph, 'mkldnn_placement_pass',
['mkldnn_enabled_op_types'], [set()])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def test(self):
quant=False,
max_examples=50,
max_duration=1000,
passes=["flatten2_matmul_fuse_pass"])
passes=["gpu_cpu_flatten2_matmul_fuse_pass"])


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def test(self):
self.run_and_statis(
quant=False,
max_examples=100,
passes=["map_matmul_to_mul_pass"],
passes=["gpu_cpu_map_matmul_to_mul_pass"],
max_duration=180)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def test(self):
self.run_and_statis(
quant=False,
max_examples=100,
passes=["map_matmul_v2_to_matmul_pass"])
passes=["gpu_cpu_map_matmul_v2_to_matmul_pass"])


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,9 @@ def sample_program_config(self, draw):

def test(self):
self.run_and_statis(
quant=False, max_examples=100,
passes=["map_matmul_v2_to_mul_pass"])
quant=False,
max_examples=100,
passes=["gpu_cpu_map_matmul_v2_to_mul_pass"])


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def generate_input(type):
return program_config

def sample_predictor_configs(self, program_config):
# map_matmul_v2_to_matmul_pass will affect the type of final fused op
# gpu_cpu_map_matmul_v2_to_matmul_pass will affect the type of final fused op
fused_op = "matmul_v2"
input1_dim1 = program_config.inputs["input_data1"].shape[0]
input2_dim1 = program_config.inputs["input_data2"].shape[0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def test(self):
quant=False,
max_examples=50,
max_duration=1000,
passes=["reshape2_matmul_fuse_pass"])
passes=["gpu_cpu_reshape2_matmul_fuse_pass"])


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def test(self):
quant=False,
max_examples=50,
max_duration=1000,
passes=["squeeze2_matmul_fuse_pass"])
passes=["gpu_cpu_squeeze2_matmul_fuse_pass"])


if __name__ == "__main__":
Expand Down

1 comment on commit 29c4a74

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.