From d53e567a242419e5e08db67a4def15cd05855cc4 Mon Sep 17 00:00:00 2001 From: ShenLiang <1422485404@qq.com> Date: Fri, 10 Sep 2021 11:25:18 +0800 Subject: [PATCH] fix bug of recompute in hybridparallel (#35588) --- paddle/fluid/operators/flatten_op.cu.cc | 5 +++++ .../paddle/distributed/fleet/meta_parallel/pp_utils/utils.py | 1 + python/paddle/fluid/contrib/mixed_precision/fp16_lists.py | 1 + python/paddle/fluid/dygraph/amp/auto_cast.py | 1 + 4 files changed, 8 insertions(+) diff --git a/paddle/fluid/operators/flatten_op.cu.cc b/paddle/fluid/operators/flatten_op.cu.cc index 223cfc6ac667d..e0987288abdd7 100644 --- a/paddle/fluid/operators/flatten_op.cu.cc +++ b/paddle/fluid/operators/flatten_op.cu.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/flatten_op.h" namespace ops = paddle::operators; +namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( flatten, ops::FlattenKernel, @@ -50,6 +51,8 @@ REGISTER_OP_CUDA_KERNEL( flatten_contiguous_range, ops::FlattenContiguousRangeKernel, + ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, + ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel