Skip to content

Commit

Permalink
add adagrad and rmsprop yaml (#44631)
Browse files Browse the repository at this point in the history
  • Loading branch information
Caozhou1995 committed Jul 27, 2022
1 parent 16506d8 commit 4b7fe61
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 3 deletions.
22 changes: 22 additions & 0 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,17 @@
kernel :
func : adadelta

- api : adagrad_
args : (Tensor param, Tensor grad, Tensor moment, Tensor learning_rate, float epsilon)
output : Tensor(param_out), Tensor(moment_out)
infer_meta :
func : AdagradInferMeta
kernel :
func : adagrad {dense, dense, dense, dense -> dense, dense}
adagrad_dense_param_sparse_grad {dense, selected_rows, dense, dense -> dense, dense}
data_type : param
inplace : (param -> param_out), (moment -> moment_out)

- api : adam_
args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1, Scalar beta2, Scalar epsilon, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow)
output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs)
Expand Down Expand Up @@ -1851,6 +1862,17 @@
func : reverse_array
backward : reverse_array_grad

- api : rmsprop_
args : (Tensor param, Tensor mean_square, Tensor grad, Tensor moment, Tensor learning_rate, Tensor mean_grad, float epsilon, float decay, float momentum, bool centered)
output : Tensor(param_out), Tensor(moment_out), Tensor(mean_square_out), Tensor(mean_grad_out)
infer_meta :
func : RmspropInferMeta
kernel :
func : rmsprop {dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense}
rmsprop_dense_param_sparse_grad {dense, dense, selected_rows, dense, dense, dense -> dense, dense, dense, dense}
optional : mean_grad
inplace : (param -> param_out), (moment -> moment_out), (mean_square -> mean_square_out), (mean_grad -> mean_grad_out)

- api : roi_align
args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output : Tensor
Expand Down
19 changes: 17 additions & 2 deletions python/paddle/fluid/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2279,11 +2279,18 @@ def _append_optimize_op(self, block, param_and_grad):

moment_acc = self._get_accumulator(self._moment_acc_str,
param_and_grad[0])
if framework._non_static_mode():
if in_dygraph_mode():
_C_ops.final_state_adagrad_(param_and_grad[0], param_and_grad[1],
moment_acc,
self._create_param_lr(param_and_grad),
self._epsilon)
return None
elif _in_legacy_dygraph():
_C_ops.adagrad(param_and_grad[0], param_and_grad[1], moment_acc,
self._create_param_lr(param_and_grad),
param_and_grad[0], moment_acc, "epsilon",
self._epsilon)
return None
else:
# Create the adagrad optimizer op
adagrad_op = block.append_op(
Expand Down Expand Up @@ -3374,14 +3381,22 @@ def _append_optimize_op(self, block, param_and_grad):
param_and_grad[0])
mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,
param_and_grad[0])
if framework._non_static_mode():
if in_dygraph_mode():
_C_ops.final_state_rmsprop_(param_and_grad[0], mean_square_acc,
param_and_grad[1], momentum_acc,
self._create_param_lr(param_and_grad),
mean_grad_acc, self._epsilon, self._rho,
self._momentum, self._centered)
return None
elif _in_legacy_dygraph():
_C_ops.rmsprop(param_and_grad[0], mean_square_acc,
self._create_param_lr(param_and_grad),
param_and_grad[1], momentum_acc, param_and_grad[0],
momentum_acc, mean_square_acc, mean_grad_acc,
"epsilon", self._epsilon, "decay", self._rho,
"momentum", self._momentum, "centered",
self._centered)
return None
else:
rmsprop_op = block.append_op(
type=self.type,
Expand Down
1 change: 0 additions & 1 deletion python/paddle/fluid/tests/unittests/test_adagrad_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ class TestAdagradOp1(OpTest):

def setUp(self):
self.op_type = "adagrad"

param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
moment = np.zeros((123, 321)).astype("float32")
Expand Down

0 comments on commit 4b7fe61

Please sign in to comment.