Skip to content

Commit

Permalink
fix conv2d convert test (#35627)
Browse files Browse the repository at this point in the history
* support nnadapter and ascend310

* modify code

* add anchor_generator convert test

* add gelu convert test

* add conv2d convert test

* modify anchor_operator convert test

* modify conv2d test

* modify con2d convert test

* modify conv2d convert test

* modify conv2d convert test

* modify conv2d test

* fix WITH_PYTHON compile error

* modify test file

* modify test file

* modify test file

* modify test file

* modify test file

* modify test file

* modify test file

* modify test file

Co-authored-by: xiaoxiaohehe001 <hiteezsf@163.com>
Co-authored-by: jiweibo <jiweibo@baidu.com>
  • Loading branch information
3 people committed Sep 22, 2021
1 parent be4d002 commit 1238115
Show file tree
Hide file tree
Showing 4 changed files with 342 additions and 108 deletions.
23 changes: 21 additions & 2 deletions paddle/fluid/inference/tensorrt/convert/conv2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,20 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
nvinfer1::DimsHW nv_ksize(filter_h, filter_w);
nvinfer1::DimsHW nv_dilations(dilations[0], dilations[1]);
nvinfer1::DimsHW nv_strides(strides[0], strides[1]);
nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]);
nvinfer1::DimsHW nv_paddings;
nvinfer1::Dims nv_pre_paddings;
nvinfer1::Dims nv_post_paddings;
if (paddings.size() == 2) {
nv_paddings.d[0] = paddings[0];
nv_paddings.d[1] = paddings[1];
} else {
nv_pre_paddings.nbDims = 2;
nv_post_paddings.nbDims = 2;
nv_pre_paddings.d[0] = paddings[0];
nv_pre_paddings.d[1] = paddings[2];
nv_post_paddings.d[0] = paddings[1];
nv_post_paddings.d[1] = paddings[3];
}

TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT,
static_cast<void*>(weight_data),
Expand Down Expand Up @@ -116,7 +129,13 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,
layer, platform::errors::Fatal("TensorRT create conv2d/conv2d_transpose"
" layer failed."));
layer->setStride(nv_strides);
layer->setPadding(nv_paddings);
if (paddings.size() == 2) {
layer->setPadding(nv_paddings);
} else {
layer->setPrePadding(nv_pre_paddings);
layer->setPostPadding(nv_post_paddings);
}

layer->setNbGroups(groups);
if (padding_algorithm == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
Expand Down
18 changes: 8 additions & 10 deletions paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
return false;

for (auto& teller : tellers_) {
if (op_type == "depthwise_conv2d") {
std::vector<int> paddings =
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));

if (paddings.size() > 2) return false;
}

if (op_type == "relu" || op_type == "relu6" || op_type == "tanh" ||
op_type == "sigmoid") {
auto* block = desc.Block();
Expand Down Expand Up @@ -208,9 +201,6 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
std::vector<int> paddings =
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));

// conv2d and conv2d_transpose need padding check
if (paddings.size() > 2 && op_type != "conv2d_fusion") return false;

if (desc.Input("Input").size() != 1) {
VLOG(3) << "TRT Conv2d expect 1 input, but got "
<< desc.Input("Input").size() << " input.";
Expand All @@ -223,6 +213,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
return false;
}

if (desc.HasAttr("padding_algorithm")) {
auto padding_algorithm =
BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm"));
if (padding_algorithm == "SAME" || padding_algorithm == "VALID") {
return false;
}
}

if (desc.HasAttr("enable_int8")) {
if (op_type == "conv2d" || op_type == "conv2d_fusion") {
if (!desc.HasAttr("Input_scale")) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,121 +22,143 @@

class TrtConvertConv2dTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
# TODO: This is just the example to remove the wrong attrs.
inputs = program_config.inputs
weights = program_config.weights
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]

# groups restriction.
if inputs['input_data'].shape[1] != weights['conv2d_weight'].shape[
1] * attrs[0]['groups']:
return False

# others restriction, todo.

return True

def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]]):
# TODO: This is just the example to illustrate the releation between axis and input.
# for each attr, can generate different datas
self.trt_param.workspace_size = 1073741824

def generate_input1(batch, attrs: List[Dict[str, Any]]):
if attrs[0]['groups'] == 1:
return np.ones([2, 3, 64, 64]).astype(np.float32)
return np.ones([batch, 3, 64, 64]).astype(np.float32)
elif attrs[0]['groups'] == 2:
return np.ones([batch, 6, 64, 64]).astype(np.float32)
else:
return np.ones([1, 3, 64, 64]).astype(np.float32)
return np.ones([batch, 9, 64, 64]).astype(np.float32)

def generate_weight1(attrs: List[Dict[str, Any]]):
return np.random.random([24, 3, 3, 3]).astype(np.float32)

# for strides in [[1, 1], [2, 2], [1, 2], [2, 3]]:
# for paddings in [[0, 3], [3, 1], [1, 1, 1, 1]]:
# for groups in [1, 2]:
# for padding_algotithm in ['EXPLICIT', 'SAME', 'VALID']:
# for dilations in [[1, 1], [1, 2]]:
# for data_format in ['NCHW']:
for strides in [[1, 1], [2, 2]]:
for paddings in [[0, 3], [3, 1]]:
for groups in [1]:
for padding_algotithm in ['EXPLICIT']:
for dilations in [[1, 1]]:
for data_format in ['NCHW']:

dics = [{
"data_fromat": data_format,
"dilations": dilations,
"padding_algorithm": padding_algotithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"data_format": data_format
}, {}]

ops_config = [{
"op_type": "conv2d",
"op_inputs": {
"Input": ["input_data"],
"Filter": ["conv2d_weight"]
},
"op_outputs": {
"Output": ["conv_output_data"]
},
"op_attrs": dics[0]
}, {
"op_type": "relu",
"op_inputs": {
"X": ["conv_output_data"]
},
"op_outputs": {
"Out": ["relu_output_data"]
},
"op_attrs": dics[1]
}]
ops = self.generate_op_config(ops_config)

program_config = ProgramConfig(
ops=ops,
weights={
"conv2d_weight": TensorConfig(
data_gen=partial(generate_weight1,
dics))
},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input1,
dics))
},
outputs=["relu_output_data"])

yield program_config
for batch in [1, 2, 4]:
for strides in [[1, 1], [2, 2], [1, 2]]:
for paddings in [[0, 3], [1, 2, 3, 4]]:
for groups in [1, 2, 3]:
for padding_algorithm in ['EXPLICIT', 'SAME', 'VALID']:
for dilations in [[1, 1], [2, 2], [1, 2]]:
for data_format in ['NCHW']:

dics = [{
"data_fromat": data_format,
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"data_format": data_format
}, {}]

if padding_algorithm == 'EXPLICIT':
ops_config = [{
"op_type": "conv2d",
"op_inputs": {
"Input": ["input_data"],
"Filter": ["conv2d_weight"]
},
"op_outputs": {
"Output": ["conv_output_data"]
},
"op_attrs": dics[0]
}, {
"op_type": "relu",
"op_inputs": {
"X": ["conv_output_data"]
},
"op_outputs": {
"Out": ["output_data"]
},
"op_attrs": dics[1]
}]
else:
ops_config = [{
"op_type": "conv2d",
"op_inputs": {
"Input": ["input_data"],
"Filter": ["conv2d_weight"]
},
"op_outputs": {
"Output": ["output_data"]
},
"op_attrs": dics[0]
}]
ops = self.generate_op_config(ops_config)

program_config = ProgramConfig(
ops=ops,
weights={
"conv2d_weight":
TensorConfig(data_gen=partial(
generate_weight1, dics))
},
inputs={
"input_data":
TensorConfig(data_gen=partial(
generate_input1, batch, dics))
},
outputs=["output_data"])

yield program_config

def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
if len(attrs[0]['paddings']) == 4:
if attrs[0]['groups'] == 1:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32],
'': []
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64],
'': []
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64],
'': []
"output_data": [1, 24, 64, 64]
}
elif attrs[0]['groups'] == 2:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 6, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 6, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 6, 64, 64],
"output_data": [1, 24, 64, 64]
}
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32]
"input_data": [1, 9, 32, 32],
"output_data": [1, 24, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64]
"input_data": [4, 9, 64, 64],
"output_data": [4, 24, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64]
"input_data": [1, 9, 64, 64],
"output_data": [1, 24, 64, 64]
}

def clear_dynamic_shape():
Expand All @@ -145,11 +167,7 @@ def clear_dynamic_shape():
self.dynamic_shape.opt_input_shape = {}

def generate_trt_nodes_num(attrs, dynamic_shape):
# TODO: This is just the example, need to be fixed.
if len(attrs[0]['paddings']) == 4:
return 1, 2
else:
return 1, 2
return 1, 2

attrs = [
program_config.ops[i].attrs
Expand All @@ -169,6 +187,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
attrs, False), (1e-5, 1e-5)

# for dynamic_shape

generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(attrs,
Expand All @@ -181,29 +200,18 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
attrs, True), (1e-5, 1e-5)

def add_skip_trt_case(self):
# TODO(wilber): This is just the example to illustrate the skip usage.
def teller1(program_config, predictor_config):
if len(program_config.ops[0].attrs['paddings']) == 4:
if program_config.ops[0].attrs[
'padding_algorithm'] == "SAME" or program_config.ops[
0].attrs['padding_algorithm'] == "VALID":
return True
return False

self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"NOT Implemented: we need to add support in the future ....TODO, just for the example"
"When padding_algorithm is 'SAME' or 'VALID', Trt dose not support. In this case, trt build error is caused by scale op."
)

def teller2(program_config, predictor_config):
if (
program_config.ops[0].attrs['dilations'][0] == 1 and
program_config.ops[0].attrs['dilations'][0] == 2
) or program_config.ops[0].attrs['padding_algorithm'] != 'EXPLICIT':
return True
return False

self.add_skip_case(teller2, SkipReasons.TRT_NOT_SUPPORT,
"TODO, just for the example")
pass

def test(self):
self.add_skip_trt_case()
self.run_test()
Expand Down
Loading

0 comments on commit 1238115

Please sign in to comment.