Skip to content

Commit

Permalink
[Yaml] add yaml for gather op and elementwise_mod op . (#41348)
Browse files Browse the repository at this point in the history
* gather op

* add mod
  • Loading branch information
2742195759 committed Apr 3, 2022
1 parent fd591ec commit 3152f3f
Show file tree
Hide file tree
Showing 7 changed files with 88 additions and 39 deletions.
5 changes: 4 additions & 1 deletion python/paddle/fluid/tests/unittests/test_activation_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -2326,7 +2326,7 @@ class TestPow(TestActivation):
def setUp(self):
self.op_type = "pow"
self.python_api = paddle.pow
self.check_eager = False
self.check_eager = True
self.init_dtype()

np.random.seed(1024)
Expand All @@ -2337,6 +2337,9 @@ def setUp(self):
self.attrs = {'factor': 3.0}
self.outputs = {'Out': out}

def test_check_output(self):
self.check_output(check_eager=self.check_eager)

def test_check_grad(self):
if self.dtype == np.float16:
return
Expand Down
11 changes: 9 additions & 2 deletions python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def init_kernel_type(self):

def setUp(self):
self.op_type = "elementwise_mod"
self.python_api = paddle.remainder
self.axis = -1
self.init_dtype()
self.init_input_output()
Expand All @@ -43,7 +44,10 @@ def setUp(self):
self.outputs = {'Out': self.out}

def test_check_output(self):
self.check_output()
if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)

def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
Expand Down Expand Up @@ -76,7 +80,10 @@ def init_input_output(self):
self.out = np.fmod(self.y + np.fmod(self.x, self.y), self.y)

def test_check_output(self):
self.check_output()
if self.attrs['axis'] == -1:
self.check_output(check_eager=True)
else:
self.check_output(check_eager=False)


class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/fluid/tests/unittests/test_gather_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,10 @@ def setUp(self):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}

def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_eager=True)

def config(self):
"""
Expand Down Expand Up @@ -136,10 +136,10 @@ def setUp(self):
self.outputs = {'Out': out}

def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=False)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True)

def config(self):
"""
Expand All @@ -165,10 +165,10 @@ def setUp(self):
self.outputs = {'Out': out}

def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=False)
self.check_grad(['X'], 'Out', check_eager=True)

def config(self):
"""
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1391,9 +1391,9 @@ def gather(x, index, axis=None, name=None):
if axis is None:
axis = 0

#if in_dygraph_mode():
#return _C_ops.final_state_gather(x, index, axis)
if _non_static_mode():
if in_dygraph_mode():
return _C_ops.final_state_gather(x, index, axis)
if _in_legacy_dygraph():
axis = axis.item() if isinstance(axis, paddle.Tensor) else axis
return _C_ops.gather(x, index, None, "axis", axis, "overwrite", False)

Expand Down
50 changes: 24 additions & 26 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,48 +150,46 @@ def pow(x, y, name=None):
"""
# in dynamic graph mode
#if in_dygraph_mode():
#if isinstance(y, (int, float)):
#return _C_ops.final_state_pow(x, y)
#elif isinstance(y, (paddle.Tensor, Variable)):
#return _elementwise_op_in_dygraph(
#x, y, axis=-1, act=None, op_name='elementwise_pow')
#else:
#raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))

#if _in_legacy_dygraph():
if _non_static_mode():
if in_dygraph_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
return _C_ops.final_state_pow(x, y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
else:
if _in_legacy_dygraph():
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))


OP_NAMEMAPPING = {
'elementwise_max': 'final_state_maximum',
'elementwise_min': 'final_state_minimum',
'elementwise_pow': 'final_state_elementwise_pow',
'elementwise_floordiv': 'final_state_floor_divide',
'elementwise_mod': 'final_state_modulo',
}

@dygraph_only
Expand Down
20 changes: 20 additions & 0 deletions python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,16 @@
data_type : dtype > x
backend : place > x

- api : gather
args : (Tensor x, Tensor index, Scalar axis=0)
output : Tensor(out)
infer_meta :
func : GatherInferMeta
kernel :
func : gather
data_type: x
backward : gather_grad

- api : gather_nd
args : (Tensor x, Tensor index)
output : Tensor
Expand Down Expand Up @@ -1220,6 +1230,16 @@
func : pool3d
backward : pool3d_grad

- api : pow
args : (Tensor x, Scalar s)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow
backward : pow_grad

- api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out)
Expand Down
23 changes: 22 additions & 1 deletion python/paddle/utils/code_gen/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
kernel :
func : conv2d_transpose_grad

- backward_api : conv3d_transpose_grad
Expand Down Expand Up @@ -389,6 +389,17 @@
kernel :
func : frobenius_norm_grad

- backward_api : gather_grad
forward : gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
data_type: x
func : gather_grad

- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
Expand Down Expand Up @@ -803,6 +814,16 @@
kernel :
func : pool3d_grad

- backward_api : pow_grad
forward : pow(Tensor x, Scalar s) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar s=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow_grad

- backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
Expand Down

0 comments on commit 3152f3f

Please sign in to comment.