diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.h b/paddle/fluid/operators/controlflow/conditional_block_op.h index d85eca8f5cb3a..f2407e9a3f05a 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.h +++ b/paddle/fluid/operators/controlflow/conditional_block_op.h @@ -119,11 +119,6 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { "The conditional variable (Cond) is used as scalar " "condition.") .SetDefault(false); - AddAttr>(ConditionalOp::kSkipEagerDeletionVars, - "Vars that would not be deleted when " - "garbage collection strategy enables") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC(Conditional block operator If `is_scalar_condition` is True, the conditional variable (Cond) is a scalar, diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index 4e0344b3b9391..10fa24b1bd4f5 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -221,11 +221,6 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") .SetDefault(false); - AddAttr>(kSkipEagerDeletionVars, - "Vars that would skip eager deletion." - "Users should not set this manually.") - .SetDefault(std::vector()) - .AsExtra(); AddComment(R"DOC( )DOC"); } diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index cb8263714a5e4..bf1f0103f768b 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -432,24 +432,6 @@ class FakeQuantOrWithDequantAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddComment(R"DOC( This is a Base Op which supports FakeQuantAbsMaxOpMaker and FakeQuantDequantAbsMaxOpMaker. FakeQuantAbsMaxOp operator is used in the dynamic quantization. @@ -529,24 +511,6 @@ class FakeChannelWiseQuantizeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") @@ -628,24 +592,6 @@ class FakeChannelWiseQuantizeDequantizeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddComment(R"DOC( The scale of FakeChannelWiseQuantize operator is a vector. In detail, each channel of the input X has a scale value. @@ -715,24 +661,6 @@ class FakeQuantizeRangeAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") @@ -815,24 +743,6 @@ class FakeQuantOrWithDequantMovingAverageAbsMaxOpMaker "the received is %d", bit_length)); }); - AddAttr( - "round_type", - "(int, default 1) The round type of fp32 to int." - "0: rounding to nearest ties to even. Eg: round(1.5)=2, round(2.5)=2" - "1: rounding to nearest ties away from zero. Eg: round(1.5)=2, " - "round(2.5)=3") - .SetDefault(1) - .AddCustomChecker([](const int &round_type) { - PADDLE_ENFORCE_EQ( - round_type == 0 || round_type == 1, - true, - platform::errors::InvalidArgument( - "'round_type' should be 0 or 1, 0 rounding to " - "nearest ties to even and 1 is rounding to nearest " - "ties away from zero.but the received is %d", - round_type)); - }) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference only, false " "for training. Some layers may run faster when this is true.") diff --git a/paddle/fluid/operators/lookup_table_v2_op.cc b/paddle/fluid/operators/lookup_table_v2_op.cc index 7baf76a1e1080..5f023fbad6a02 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cc +++ b/paddle/fluid/operators/lookup_table_v2_op.cc @@ -84,46 +84,12 @@ class LookupTableV2OpMaker : public framework::OpProtoAndCheckerMaker { "An input with type int64 " "contains the ids to be looked up in W."); AddOutput("Out", "The lookup results, which have the same type as W."); - AddAttr("is_sparse", - "(boolean, default false) " - "Sparse update.") - .SetDefault(false) - .AsExtra(); - AddAttr("is_distributed", - "(boolean, default false) distributed lookup table.") - .SetDefault(false) - .AsExtra(); AddAttr("padding_idx", "(int64, default -1) " "If the value is -1, it makes no effect to lookup. " "Otherwise the given value indicates padding the output " "with zeros whenever lookup encounters it in Ids.") .SetDefault(kNoPadding); - - // for parameter prefetch - AddAttr("remote_prefetch", "").SetDefault(false).AsExtra(); - AddAttr("trainer_id", "trainer id from 0 ~ worker_num.") - .SetDefault(0) - .AsExtra(); - AddAttr("slot", "slot of id").SetDefault(0).AsExtra(); - AddAttr>("height_sections", - "Height for each output SelectedRows.") - .SetDefault(std::vector({})) - .AsExtra(); - AddAttr>( - "epmap", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddAttr>( - "table_names", - "(string vector, the split table names that will be fetched from " - "parameter server)" - "in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddComment(R"DOC( Lookup Table V2 Operator. diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index c9c4d1a4c74f3..dd093729d1913 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -207,34 +207,6 @@ class NCEOpMaker : public framework::OpProtoAndCheckerMaker { // for parameter prefetch AddAttr("remote_prefetch", "").SetDefault(false); - AddAttr("trainer_id", "trainer id from 0 ~ worker_num.") - .SetDefault(0) - .AsExtra(); - AddAttr>("height_sections", - "Height for each output SelectedRows.") - .SetDefault(std::vector({})) - .AsExtra(); - AddAttr>( - "epmap", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - AddAttr>( - "table_names", - "(string vector, the split table names that will be fetched from " - "parameter server)" - "in the order of input variables for mapping") - .SetDefault({}) - .AsExtra(); - - AddAttr>("custom_neg_classes", - "This attribute only be used in unitest. Classes " - "in this list wiil be used as negative classes " - "for every samples. Under normal conditions, " - "user should avoid setting this attribute.") - .SetDefault({}) - .AsExtra(); AddAttr("is_test", "(bool, default false) Set to true for inference " "only, false for training.") diff --git a/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc b/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc index 840e33939897f..a2bf63da10bd2 100644 --- a/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc +++ b/paddle/fluid/operators/pscore/distributed_push_sparse_op.cc @@ -113,11 +113,6 @@ class DistributedPushSparseOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_cvm_op", "(boolean, default false) Use cvm op or not.") .SetDefault(false); - AddAttr>("slots", - "[slot_id1, slot_id2] Slots array of Ids.") - .SetDefault({}) - .AsExtra(); - AddComment(R"DOC( Lookup Tablel Prefetch Operator. This operator is used to perform lookup on parameter W, diff --git a/paddle/fluid/operators/quantize_linear_op.cc b/paddle/fluid/operators/quantize_linear_op.cc index 7012da3aeda94..d4cd685575eec 100644 --- a/paddle/fluid/operators/quantize_linear_op.cc +++ b/paddle/fluid/operators/quantize_linear_op.cc @@ -134,10 +134,6 @@ class QuantizeLinearOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("OutScale", "(Tensor) Current scale") .AsDispensable() .AsExtra(); // only qat use - AddAttr("moving_rate", - "(float, default 0.9) moving rate.") // only qat use - .SetDefault(0.9) - .AsExtra(); AddAttr("quant_axis", "(int, default 0) The axis for quantization. " "For conv2d, depthwise_conv2d, conv2d_transpose " diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc index 5417c20f3d419..5a6d2ab0820e2 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc @@ -73,14 +73,6 @@ class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Only used in cudnn kernel, need install cudnn") .SetDefault(false) .AsExtra(); - AddAttr( - "data_format", - "(string, default NCHW) Only used in " - "An optional string from: \"NHWC\", \"NCHW\". " - "Defaults to \"NHWC\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault("AnyLayout") - .AsExtra(); AddComment(R"DOC( Sequence Softmax Operator. diff --git a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py index 6f234e494f52d..b862d8bfe0a85 100644 --- a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py +++ b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py @@ -59,7 +59,7 @@ def map_code_template(attrs_str, attrs_checker_str): def parse_attr(attr_str): result = re.search( - r"(?P[a-z[\]]+)\s+(?P[a-zA-Z0-9_]+)\s*=\s*(?P\S+)", + r"(?P[a-zA-Z0-9_[\]]+)\s+(?P[a-zA-Z0-9_]+)\s*=\s*(?P\S+)", attr_str) return ATTR_TYPE_STRING_MAP[result.group('attr_type')], result.group( 'name'), result.group('default_val') diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 516e03662d542..357781318a13e 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -93,6 +93,11 @@ extra : attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"] +- op : conditional_block + backward : conditional_block_grad + extra : + attrs : ['str[] skip_eager_deletion_vars = {}'] + - op : conv2d backward : conv2d_grad extra : @@ -174,6 +179,10 @@ str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] +- op : dequantize_linear + extra : + attrs : [float moving_rate = 0.9] + - op : diag (diag_v2) backward : diag_grad (diag_v2_grad) inputs : @@ -199,6 +208,10 @@ outputs : out : Out +- op : distributed_push_sparse + extra : + attrs : ['int[] slots = {}'] + - op : divide (elementwise_div) backward : divide_grad (elementwise_div) extra : @@ -232,6 +245,13 @@ extra : attrs : [bool use_mkldnn = false] +- op : embedding (lookup_table_v2) + backward : embedding_grad (lookup_table_v2_grad) + extra : + attrs : [bool is_sparse = false, bool is_distributed = false, bool remote_prefetch = false, + int trainer_id = 0, int slot = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}', + 'str[] table_names = {}'] + - op : erf inputs : x : X @@ -259,6 +279,34 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : fake_channel_wise_quantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_channel_wise_quantize_dequantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_dequantize_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_dequantize_moving_average_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_moving_average_abs_max + extra : + attrs : [int round_type = 1] + +- op : fake_quantize_range_abs_max + extra : + attrs : [int round_type = 1] + - op : fft_c2c inputs: {x: X} outputs: {out: Out} @@ -441,6 +489,12 @@ outputs : out : Out +- op : nce + backward : nce_grad + extra : + attrs : [int trainer_id = 0, 'int64_t[] height_sections = {}', 'str[] epmap = {}', + 'str[] table_names = {}', 'int[] custom_neg_classes = {}'] + - op : nearest_interp (nearest_interp_v2) backward : nearest_interp_grad (nearest_interp_v2_grad) extra : @@ -483,6 +537,10 @@ extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false] +- op : quantize_linear + extra : + attrs : [float moving_rate = 0.9] + - op : reciprocal backward : reciprocal_grad extra : @@ -574,6 +632,11 @@ extra : attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false] +- op : sequence_softmax + backward : sequence_softmax_grad + extra : + attrs : [str data_format = "AnyLayout"] + - op : shape extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] @@ -650,11 +713,6 @@ extra : attrs : [bool use_mkldnn = false] -- op : stack - backward : stack_grad - extra : - attrs : [bool use_mkldnn = false] - - op : subtract (elementwise_sub) backward : subtract_grad (elementwise_sub_grad) extra : @@ -708,3 +766,8 @@ x : X outputs : out : Out + +- op : while + backward : while_grad + extra : + attrs : ['str[] skip_eager_deletion_vars = {}']