Skip to content

Commit

Permalink
【Hackathon No.8】add api hypot & hypot_ (#57295)
Browse files Browse the repository at this point in the history
  • Loading branch information
llyyxx0413 committed Oct 18, 2023
1 parent 3d765cc commit d392526
Show file tree
Hide file tree
Showing 5 changed files with 218 additions and 0 deletions.
4 changes: 4 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,8 @@
i1e,
polygamma,
polygamma_,
hypot,
hypot_,
)

from .tensor.random import (
Expand Down Expand Up @@ -904,4 +906,6 @@
'i1e',
'polygamma',
'polygamma_',
'hypot',
'hypot_',
]
4 changes: 4 additions & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,8 @@
from .math import polygamma_ # noqa: F401
from .math import renorm # noqa: F401
from .math import renorm_ # noqa: F401
from .math import hypot # noqa: F401
from .math import hypot_ # noqa: F401

from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
Expand Down Expand Up @@ -464,6 +466,8 @@
'sum',
'nan_to_num',
'nan_to_num_',
'hypot',
'hypot_',
'nansum',
'nanmean',
'count_nonzero',
Expand Down
53 changes: 53 additions & 0 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -6932,3 +6932,56 @@ def ldexp_(x, y, name=None):
y = paddle.cast(y, dtype=out_dtype)
two = paddle.to_tensor(2, dtype=out_dtype)
return paddle.multiply_(x, paddle.pow(two, y))


def hypot(x, y, name=None):
"""
Calculate the length of the hypotenuse of a right-angle triangle. The equation is:
.. math::
out = {\\sqrt{x^2 + y^2}}
Args:
x (Tensor): The input Tensor, the data type is float32, float64, int32 or int64.
y (Tensor): The input Tensor, the data type is float32, float64, int32 or int64.
name (str, optional): Name for the operation (optional, default is None).For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y. And the data type is float32 or float64.
Examples:
.. code-block:: python
>>> import paddle
>>> x = paddle.to_tensor([3], dtype='float32')
>>> y = paddle.to_tensor([4], dtype='float32')
>>> res = paddle.hypot(x, y)
>>> print(res)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[5.])
"""
if not isinstance(x, (paddle.Tensor, Variable)):
raise TypeError(f"x must be tensor type, but got {type(x)}")
if not isinstance(y, (paddle.Tensor, Variable)):
raise TypeError(f"y must be tensor type, but got {type(y)}")

out = (paddle.pow(x, 2) + paddle.pow(y, 2)).sqrt()
return out


@inplace_apis_in_dygraph_only
def hypot_(x, y, name=None):
r"""
Inplace version of ``hypot`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_hypot`.
"""
if not isinstance(x, (paddle.Tensor, Variable)):
raise TypeError(f"x must be tensor type, but got {type(x)}")
if not isinstance(y, (paddle.Tensor, Variable)):
raise TypeError(f"y must be tensor type, but got {type(y)}")

out = x.pow_(2).add_(y.pow(2)).sqrt_()
return out
104 changes: 104 additions & 0 deletions test/legacy_test/test_hypot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np

import paddle
from paddle import base
from paddle.base import core

paddle.enable_static()


class TestHypotAPI(unittest.TestCase):
def setUp(self):
self.x_shape = [10, 10]
self.y_shape = [10, 1]
self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32)
self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32)

def test_static_graph(self):
paddle.enable_static()
startup_program = base.Program()
train_program = base.Program()
with base.program_guard(startup_program, train_program):
x = paddle.static.data(
name='input1', dtype='float32', shape=self.x_shape
)
y = paddle.static.data(
name='input2', dtype='float32', shape=self.y_shape
)
out = paddle.hypot(x, y)

place = (
base.CUDAPlace(0)
if core.is_compiled_with_cuda()
else base.CPUPlace()
)
exe = base.Executor(place)
res = exe.run(
base.default_main_program(),
feed={'input1': self.x_np, 'input2': self.y_np},
fetch_list=[out],
)
np_out = np.hypot(self.x_np, self.y_np)
np.testing.assert_allclose(res[0], np_out, atol=1e-5, rtol=1e-5)
paddle.disable_static()

def test_dygraph(self):
paddle.disable_static()
x = paddle.to_tensor(self.x_np)
y = paddle.to_tensor(self.y_np)
result = paddle.hypot(x, y)
np.testing.assert_allclose(
np.hypot(self.x_np, self.y_np), result.numpy(), rtol=1e-05
)

paddle.enable_static()

def test_error(self):
x = paddle.to_tensor(self.x_np)
y = 3.8
self.assertRaises(TypeError, paddle.hypot, x, y)
self.assertRaises(TypeError, paddle.hypot, y, x)


class TestHypotAPIBroadCast(TestHypotAPI):
def setUp(self):
self.x_np = np.arange(6).astype(np.float32)
self.y_np = np.array([20]).astype(np.float32)
self.x_shape = [6]
self.y_shape = [1]


class TestHypotAPI3(TestHypotAPI):
def setUp(self):
self.x_shape = []
self.y_shape = []
self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32)
self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32)


class TestHypotAPI4(TestHypotAPI):
def setUp(self):
self.x_shape = [1]
self.y_shape = [1]
self.x_np = np.random.uniform(-10, 10, self.x_shape).astype(np.float32)
self.y_np = np.random.uniform(-10, 10, self.y_shape).astype(np.float32)


if __name__ == "__main__":
unittest.main()
53 changes: 53 additions & 0 deletions test/legacy_test/test_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -834,6 +834,59 @@ def test_error(self):
self.assertRaises(ValueError, paddle.gcd_, x, y)


class TestDygraphInplaceHypot(TestDygraphInplace):
def init_data(self):
self.input_var_numpy = np.random.randint(2, size=200)
self.input_var_numpy = self.input_var_numpy.reshape([10, 20])
self.dtype = "float32"
self.y = paddle.randn(shape=[10, 20], dtype="float32")

def inplace_api_processing(self, var):
return paddle.hypot_(var, self.y)

def non_inplace_api_processing(self, var):
return paddle.hypot(var, self.y)

def test_errors(self):
x = 3.0
self.assertRaises(TypeError, paddle.hypot_, x, self.y)
self.assertRaises(TypeError, paddle.hypot_, self.y, x)

def test_forward_version(self):
with paddle.base.dygraph.guard():
var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
self.assertEqual(var.inplace_version, 0)

inplace_var = self.inplace_api_processing(var)
self.assertEqual(var.inplace_version, 3)

inplace_var[0] = 2.0
self.assertEqual(var.inplace_version, 4)

inplace_var = self.inplace_api_processing(inplace_var)
self.assertEqual(var.inplace_version, 7)

def test_backward_error(self):
# It raises an error because the inplace operator will result
# in incorrect gradient computation.
with paddle.base.dygraph.guard():
var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype)
var_a.stop_gradient = False

var_b = var_a**2
# Here, the gradient computation will use the value of var_b
var_c = var_b**2
self.inplace_api_processing(var_b)
var_c = paddle.cast(var_c, "float32")

loss = paddle.nn.functional.relu(var_c)
with self.assertRaisesRegex(
RuntimeError,
f"received tensor_version:{3} != wrapper_version_snapshot:{0}",
):
loss.backward()


class TestDygraphInplaceNanToNum(TestDygraphInplace):
def init_data(self):
self.input_var_numpy = np.array(
Expand Down

0 comments on commit d392526

Please sign in to comment.