From e64105f6d00c02012fbef74c5b737dff699666f7 Mon Sep 17 00:00:00 2001 From: Roc <30228238+sljlp@users.noreply.github.com> Date: Tue, 31 Aug 2021 13:37:01 +0800 Subject: [PATCH] [hybrid npu] fix npu found_finite in hybrid (#35134) (#35291) Co-authored-by: WangXi --- .../fleet/meta_optimizers/sharding_optimizer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py index 5c2f24054f835..ed16c2296f1e2 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py @@ -369,8 +369,11 @@ def _adapt_amp_clip_without_sharding(self): # FIXME(wangxi): mp should prune duplicated param_grads when calc # amp inf_var & clip global_norm_var - FP16Utils.sync_amp_check_nan_inf(main_block, - [self.mp_ring_id, self.pp_ring_id]) + rings = [self.mp_ring_id, self.pp_ring_id] + # FIXME(wangxi): some problem with NPU found_finite, need sync with DP + if core.is_compiled_with_npu(): + rings += [self.dp_ring_id] + FP16Utils.sync_amp_check_nan_inf(main_block, rings) gradientclip_helper = GradientClipHelper(None) gradientclip_helper.sync_global_norm(