Skip to content

Commit

Permalink
Merge pull request apache#18 from heliqi/paddle_frontend
Browse files Browse the repository at this point in the history
Paddle frontend add unary function
  • Loading branch information
jiangjiajun committed Sep 2, 2021
2 parents fabed17 + d45eac0 commit 5ae3b5b
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 64 deletions.
48 changes: 22 additions & 26 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from .. import op as _op
from .common import (
fold_constant,
get_relay_op,
infer_shape,
infer_type,
infer_value,
Expand Down Expand Up @@ -75,19 +76,14 @@ def _infer_value(x, params):
return x


def convert_activation(g, op, block):
def convert_unary_op(g, op, block):
"""Operator converter for all the activation."""

op_map = {
"exp": _op.exp,
"relu": _op.nn.relu,
"tanh": _op.tanh,
"sqrt": _op.sqrt,
"erf": _op.erf,
"abs": _op.abs,
"sigmoid": _op.sigmoid,
}
act_func = op_map[op.type]
op_map = {}
if op.type in op_map:
act_func = op_map[op.type]
else:
act_func = get_relay_op(op.type)
out = act_func(g.get_node(op.input("X")[0]))
g.add_node(op.output("Out")[0], out)

Expand Down Expand Up @@ -138,8 +134,8 @@ def convert_batch_norm(g, op, block):
def convert_bmm(g, op, block):
"""Operator converter for bmm."""

x = g.get_node(op.input('X')[0])
y = g.get_node(op.input('Y')[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
x_shape = infer_shape(x)
y_shape = infer_shape(y)
if x_shape[0] == 1 and y_shape == 1:
Expand All @@ -148,23 +144,23 @@ def convert_bmm(g, op, block):
y = _op.transpose(y, [1, 0])
out = _op.nn.dense(x, y)
out = _op.expand_dims(out, axis=0)
g.add_node(op.output('Out')[0], out)
g.add_node(op.output("Out")[0], out)
else:
y = _op.transpose(y, [0, 2, 1])
out = _op.nn.batch_matmul(x, y)
g.add_node(op.output('Out')[0], out)
g.add_node(op.output("Out")[0], out)


def convert_interpolate2d(g, op, x):
"""Operator converter for interpolate 2D(dims == 4)."""

def get_interpolate_mode(op):
"""conver 'interp_method' attr of paddle to tvm"""

interp_method = op.attr("interp_method")
align_corners = op.attr("align_corners")
align_mode = op.attr("align_mode")

rounding_method = ""
if interp_method == "nearest":
interp_method = "nearest_neighbor"
Expand Down Expand Up @@ -325,8 +321,7 @@ def convert_conv2d_transpose(g, op, block):
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
strides = op.attr("strides")
output_padding = op.attr("output_padding") if op.attr(
"output_padding") else [0, 0]
output_padding = op.attr("output_padding") if op.attr("output_padding") else [0, 0]

kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
Expand Down Expand Up @@ -930,9 +925,9 @@ def convert_squeeze(g, op, block):
def convert_transpose(g, op, block):
"""Operator converter for transpose."""

perm = op.attr('axis')
out = _op.transpose(g.get_node(op.input('X')[0]), axes=perm)
g.add_node(op.output('Out')[0], out)
perm = op.attr("axis")
out = _op.transpose(g.get_node(op.input("X")[0]), axes=perm)
g.add_node(op.output("Out")[0], out)


def convert_unsqueeze(g, op, block):
Expand All @@ -946,6 +941,7 @@ def convert_unsqueeze(g, op, block):


_convert_map = {
"abs": convert_unary_op,
"arg_max": convert_arg_max,
"assign": convert_assign,
"batch_norm": convert_batch_norm,
Expand All @@ -964,7 +960,7 @@ def convert_unsqueeze(g, op, block):
"elementwise_mul": convert_elementwise_op,
"elementwise_sub": convert_elementwise_op,
"equal": convert_equal,
"exp": convert_activation,
"exp": convert_unary_op,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
Expand All @@ -982,15 +978,15 @@ def convert_unsqueeze(g, op, block):
"pad1d": convert_padding,
"pad2d": convert_padding,
"pad3d": convert_padding,
"relu": convert_activation,
"relu": convert_unary_op,
"reshape2": convert_reshape,
"scale": convert_scale,
"shape": convert_shape,
"sigmoid": convert_activation,
"sigmoid": convert_unary_op,
"slice": convert_slice,
"softmax": convert_softmax,
"squeeze2": convert_squeeze,
"tanh": convert_activation,
"tanh": convert_unary_op,
"transpose2": convert_transpose,
"unsqueeze2": convert_unsqueeze,
}
Expand Down
73 changes: 35 additions & 38 deletions tests/python/frontend/paddlepaddle/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,32 @@ def verify_model(func, input_data, rtol=1e-5, atol=1e-5, input_shape=None):
tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)


@tvm.testing.uses_gpu
def test_forward_unary_op():
class UnaryOp(nn.Layer):
def __init__(self, op_name):
super(UnaryOp, self).__init__()
for candidate in (paddle, paddle.nn.functional):
self.func = getattr(candidate, op_name, None)
if self.func:
break

@paddle.jit.to_static
def forward(self, inputs):
return self.func(inputs)

input_data = paddle.rand([1, 2, 5, 5], dtype="float32")
op_list = [
"abs",
"exp",
"relu",
"sigmoid",
"tanh",
]
for op_name in op_list:
verify_model(UnaryOp(op_name), input_data)


@tvm.testing.uses_gpu
def test_forward_add_subtract():
input_shape = [10]
Expand Down Expand Up @@ -368,7 +394,13 @@ class Conv2DTranspose2(nn.Layer):
def __init__(self):
super(Conv2DTranspose2, self).__init__()
self.conv_transpose = nn.Conv2DTranspose(
3, 5, 3, stride=2, padding=[[0,0],[0,0],[1,2],[3,4]], output_padding=1, bias_attr=True
3,
5,
3,
stride=2,
padding=[[0, 0], [0, 0], [1, 2], [3, 4]],
output_padding=1,
bias_attr=True,
)

@paddle.jit.to_static
Expand All @@ -379,7 +411,7 @@ class Conv2DTranspose3(nn.Layer):
def __init__(self):
super(Conv2DTranspose3, self).__init__()
self.conv_transpose = nn.Conv2DTranspose(
3, 5, 3, stride=3, padding='VALID', output_padding=2, bias_attr=True
3, 5, 3, stride=3, padding="VALID", output_padding=2, bias_attr=True
)

@paddle.jit.to_static
Expand Down Expand Up @@ -683,17 +715,6 @@ def pad4(inputs):
verify_model(pad4, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_relu():
@paddle.jit.to_static
def relu(inputs):
return nn.functional.relu(inputs)

input_shape = [10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(relu, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_reshape():
@paddle.jit.to_static
Expand Down Expand Up @@ -800,28 +821,6 @@ def squeeze3(inputs):
verify_model(squeeze3, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_tanh():
@paddle.jit.to_static
def tanh(inputs):
return paddle.tanh(inputs)

input_shape = [1, 3, 10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(tanh, input_data=input_data)


@tvm.testing.uses_gpu
def test_forward_sigmoid():
@paddle.jit.to_static
def sigmoid(inputs):
return nn.functional.sigmoid(inputs)

input_shape = [10, 10]
input_data = paddle.rand(input_shape, dtype="float32")
verify_model(sigmoid, input_data=input_data)


if __name__ == "__main__":
test_forward_add_subtract()
test_forward_argmax()
Expand All @@ -845,11 +844,9 @@ def sigmoid(inputs):
test_forward_matmul()
test_forward_pool2d()
test_forward_pad()
test_forward_relu()
test_forward_reshape()
test_forward_scale()
test_forward_slice()
test_forward_squeeze2()
test_forward_tanh()
test_forward_conv_transpose()
test_forward_sigmoid()
test_forward_unary_op()

0 comments on commit 5ae3b5b

Please sign in to comment.