Skip to content

Commit

Permalink
[Zero-Dim] fix cases to Support paddle.all/any/min/max/prod/logsumexp…
Browse files Browse the repository at this point in the history
…/amax/amin output 0D
  • Loading branch information
zhwesky2010 committed Apr 13, 2023
1 parent 558152a commit c4860f6
Show file tree
Hide file tree
Showing 17 changed files with 57 additions and 57 deletions.
6 changes: 3 additions & 3 deletions framework/api/distribution/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def compute_grad(self, res, data=None, **kwargs):
shape = v.numpy().shape
for i in range(len(v.numpy().flatten())):
g = self._get_sigle_grad(v, i, k)
grad.append(g[0])
grad.append(g.item())
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
elif isinstance(v, (list, tuple)) and isinstance(v[0], paddle.Tensor):
Expand All @@ -512,7 +512,7 @@ def compute_grad(self, res, data=None, **kwargs):
shape = v[n].shape
for i in range(len(v[n].flatten())):
g = self._get_sigle_grad(v[n], i, k, n)
grad.append(g[0])
grad.append(g.item())
self.kwargs[k][n] = v[n]
tmp.append(np.array(grad).reshape(shape))
numeric_grad[k] = tmp
Expand All @@ -532,7 +532,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
4 changes: 2 additions & 2 deletions framework/api/fft/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def compute_grad(self, res, data=None, **kwargs):
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
# print("-----> {}".format(g))
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand All @@ -521,7 +521,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
6 changes: 3 additions & 3 deletions framework/api/incubate/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def compute_grad(self, res, data=None, **kwargs):
shape = v.numpy().shape
for i in range(len(v.numpy().flatten())):
g = self._get_sigle_grad(v, i, k)
grad.append(g[0])
grad.append(g.item())
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
elif isinstance(v, (list, tuple)) and isinstance(v[0], paddle.Tensor):
Expand All @@ -512,7 +512,7 @@ def compute_grad(self, res, data=None, **kwargs):
shape = v[n].shape
for i in range(len(v[n].flatten())):
g = self._get_sigle_grad(v[n], i, k, n)
grad.append(g[0])
grad.append(g.item())
self.kwargs[k][n] = v[n]
tmp.append(np.array(grad).reshape(shape))
numeric_grad[k] = tmp
Expand All @@ -532,7 +532,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
4 changes: 2 additions & 2 deletions framework/api/linalg/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ def compute_grad(self, res, data=None, **kwargs):
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
# print("-----> {}".format(g))
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand All @@ -517,7 +517,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
22 changes: 11 additions & 11 deletions framework/api/linalg/test_cond.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_cond_base():
base
"""
x = np.array([[1.0, 0, -1], [0, 1, 0], [1, 0, 1]])
res = np.array([np.linalg.cond(x)])
res = np.linalg.cond(x)
obj.base(res=res, x=x)


Expand All @@ -48,7 +48,7 @@ def test_cond0():
"""
x = randtool("float", -2, 4, [3, 4])
# x = np.random.rand(3, 4)
res = np.array([np.linalg.cond(x)])
res = np.linalg.cond(x)
# print(res)
obj.run(res=res, x=x)

Expand All @@ -59,7 +59,7 @@ def test_cond1():
m!=n, p=-2
"""
x = randtool("float", -2, 4, [6, 4])
res = np.array([np.linalg.cond(x, p=-2)])
res = np.linalg.cond(x, p=-2)
obj.run(res=res, x=x, p=-2)


Expand All @@ -70,7 +70,7 @@ def test_cond2():
x: multiple dimension
"""
x = randtool("float", -20, 40, [6, 2, 4, 3, 4])
res = np.array(np.linalg.cond(x))
res = np.linalg.cond(x)
obj.run(res=res, x=x)


Expand All @@ -80,7 +80,7 @@ def test_cond3():
x: n*n; p=2
"""
x = randtool("float", -20, 40, [4, 4])
res = np.array([np.linalg.cond(x)])
res = np.linalg.cond(x)
obj.run(res=res, x=x)


Expand All @@ -90,7 +90,7 @@ def test_cond4():
x: n*n; p=-2
"""
x = randtool("float", -2, 40, [4, 4])
res = np.array([np.linalg.cond(x, p=-2)])
res = np.linalg.cond(x, p=-2)
obj.run(res=res, x=x, p=-2)


Expand All @@ -100,7 +100,7 @@ def test_cond5():
x: n*n; p=-2
"""
x = randtool("float", -2, 40, [4, 4])
res = np.array([np.linalg.cond(x, p=-2)])
res = np.linalg.cond(x, p=-2)
obj.run(res=res, x=x, p=-2)


Expand Down Expand Up @@ -130,7 +130,7 @@ def test_cond8():
x: n*n; p=1
"""
x = randtool("float", -2, 40, [4, 4])
res = np.array([np.linalg.cond(x, p=1)])
res = np.linalg.cond(x, p=1)
obj.run(res=res, x=x, p=1)


Expand All @@ -140,7 +140,7 @@ def test_cond9():
x: n*n; p=-1
"""
x = randtool("float", -4, 4, [4, 2, 4, 4])
res = np.array(np.linalg.cond(x, p=-1))
res = np.linalg.cond(x, p=-1)
obj.run(res=res, x=x, p=-1)


Expand All @@ -150,7 +150,7 @@ def test_cond10():
x: n*n; p=inf
"""
x = randtool("float", -4, 4, [4, 2, 4, 4])
res = np.array(np.linalg.cond(x, p=np.inf))
res = np.linalg.cond(x, p=np.inf)
obj.run(res=res, x=x, p=np.inf)


Expand All @@ -160,5 +160,5 @@ def test_cond11():
x: n*n; p=-inf
"""
x = randtool("float", -4, 4, [4, 2, 4, 4])
res = np.array(np.linalg.cond(x, p=-np.inf))
res = np.linalg.cond(x, p=-np.inf)
obj.run(res=res, x=x, p=-np.inf)
2 changes: 1 addition & 1 deletion framework/api/linalg/test_multi_dot.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def numerical_grad(**kwargs):
kwargs[k] = tmp
loss_delta = cal_loss(**kwargs)
g = (loss_delta - loss) / gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand Down
8 changes: 4 additions & 4 deletions framework/api/linalg/test_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def test_norm_base():
base
"""
x = randtool("float", -10, 10, [2, 3, 4])
res = [28.43878906]
res = np.array(28.43878906)
obj.base(res=res, x=x)


Expand All @@ -102,7 +102,7 @@ def test_norm():
"""
np.random.seed(33)
x = randtool("float", -10, 10, [3, 3, 3])
res = [31.5736]
res = np.array(31.5736)
obj.run(res=res, x=x, axis=None)


Expand Down Expand Up @@ -222,7 +222,7 @@ def test_norm10():
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
pord = "fro"
axis = None
res = [16.8819]
res = np.array(16.8819)
obj.run(res=res, x=x, axis=axis, p=pord)


Expand All @@ -234,7 +234,7 @@ def test_norm11():
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]).reshape(3, 3)
pord = "fro"
axis = None
res = [16.8819]
res = np.array(16.8819)
obj.run(res=res, x=x, axis=axis, p=pord)


Expand Down
4 changes: 2 additions & 2 deletions framework/api/loss/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def compute_grad(self, res, data=None, **kwargs):
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
# print("-----> {}".format(g))
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand All @@ -521,7 +521,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
4 changes: 2 additions & 2 deletions framework/api/nn/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def compute_grad(self, res, data=None, **kwargs):
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
# print("-----> {}".format(g))
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand All @@ -521,7 +521,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
4 changes: 2 additions & 2 deletions framework/api/paddlebase/apibase.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ def compute_grad(self, res, data=None, **kwargs):
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
# print("-----> {}".format(g))
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.kwargs[k] = v
numeric_grad[k] = np.array(grad).reshape(shape)
Expand All @@ -521,7 +521,7 @@ def compute_grad(self, res, data=None, **kwargs):
self.data.stop_gradient = False
loss_delta = self._numeric_grad()
g = (loss_delta - loss) / self.gap
grad.append(g[0])
grad.append(g.item())
# recover v to self.kwargs
self.data = data
numeric_grad["data"] = np.array(grad).reshape(shape)
Expand Down
8 changes: 4 additions & 4 deletions framework/api/paddlebase/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_all0():
default
"""
x = np.random.randint(-4, 4, (10,))
res = np.array([np.all(x)])
res = np.all(x)
obj.base(res=res, x=x)


Expand All @@ -47,7 +47,7 @@ def test_all1():
x: 2d-tensor
"""
x = np.random.randint(-4, 4, (10, 10))
res = np.array([np.all(x)])
res = np.all(x)
obj.base(res=res, x=x)


Expand All @@ -57,7 +57,7 @@ def test_all2():
x: 3d-tensor
"""
x = np.random.randint(-4, 4, (3, 4, 2))
res = np.array([np.all(x)])
res = np.all(x)
obj.base(res=res, x=x)


Expand All @@ -67,7 +67,7 @@ def test_all3():
x: 4d-tensor
"""
x = np.random.randint(-4, 4, (2, 4, 4, 2))
res = np.array([np.all(x)])
res = np.all(x)
obj.base(res=res, x=x)


Expand Down
8 changes: 4 additions & 4 deletions framework/api/paddlebase/test_any.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_any0():
default
"""
x = np.random.randint(-4, 4, (10,))
res = np.array([np.any(x)])
res = np.any(x)
obj.base(res=res, x=x)


Expand All @@ -47,7 +47,7 @@ def test_any1():
x: 2d-tensor
"""
x = np.random.randint(-4, 4, (10, 10))
res = np.array([np.any(x)])
res = np.any(x)
obj.base(res=res, x=x)


Expand All @@ -57,7 +57,7 @@ def test_any2():
x: 3d-tensor
"""
x = np.random.randint(-4, 4, (3, 4, 2))
res = np.array([np.any(x)])
res = np.any(x)
obj.base(res=res, x=x)


Expand All @@ -67,7 +67,7 @@ def test_any3():
x: 4d-tensor
"""
x = np.random.randint(-4, 4, (2, 4, 4, 2))
res = np.array([np.any(x)])
res = np.any(x)
obj.base(res=res, x=x)


Expand Down
6 changes: 3 additions & 3 deletions framework/api/paddlebase/test_max.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_max_base():
max_base
"""
x_data = np.arange(6).reshape(2, 3).astype(np.float32)
res = np.array([5])
res = np.max(x_data)
obj.base(res=res, x=x_data)


Expand All @@ -47,7 +47,7 @@ def test_max_2D_tensor():
max_2D_tensor
"""
x_data = np.arange(6).reshape(2, 3).astype(np.float32)
res = np.array([5])
res = np.max(x_data)
obj.run(res=res, x=x_data)


Expand Down Expand Up @@ -112,5 +112,5 @@ def test_max_1():
special input
"""
x_data = np.array([[-1.00595951, -0.20009832], [-0.35623679, -0.95880121]])
res = np.array([-0.20009832])
res = np.max(x_data)
obj.run(res=res, x=x_data, axis=[-2, 1], keepdim=False)
Loading

0 comments on commit c4860f6

Please sign in to comment.