diff --git a/python/paddle/distributed/auto_parallel/parallelizer.py b/python/paddle/distributed/auto_parallel/parallelizer.py index ccdcc36434e33..f8c0edf84d665 100644 --- a/python/paddle/distributed/auto_parallel/parallelizer.py +++ b/python/paddle/distributed/auto_parallel/parallelizer.py @@ -23,7 +23,6 @@ import paddle from paddle.distributed.utils import get_logger from paddle.distributed.fleet import cloud_utils -from paddle.distributed.fleet.launch_utils import run_with_coverage import paddle.fluid.core as core from .dist_context import DistributedContext from .dist_context import get_default_distributed_context @@ -139,7 +138,6 @@ def parallelize(self, rank_mapping_args = " ".join( ["--rank_mapping_path", rank_mapping_path]) if os.environ.get("WITH_COVERAGE", "OFF") == "ON": - run_with_coverage(True) coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = [] diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 04aa3b8db19b2..1764e0b2cbf1a 100644 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -542,7 +542,8 @@ def start_local_trainers(cluster, current_env.update(proc_env) coverage_args = [] - if run_with_coverage(): + if run_with_coverage() or os.environ.get("WITH_COVERAGE", + "OFF") == "ON": coverage_args = ["-m", "coverage", "run", "--branch", "-p"] cmd = [sys.executable, "-u"] + coverage_args + [training_script ] + training_script_args diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt b/python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt index 219094d36fda6..58fc9ebb339d5 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/auto_parallel/CMakeLists.txt @@ -1,5 +1,5 @@ # file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") # string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") if(WITH_DISTRIBUTE AND WITH_GPU AND WITH_NCCL) - list(APPEND DIST_TEST_OPS test_auto_parallel_launch) + list(APPEND DIST_TEST_OPS test_auto_parallel_relaunch) endif() diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py index d4c982c68e469..321b262286218 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_auto_parallel_relaunch.py @@ -90,7 +90,6 @@ def test_relaunch(self): "auto_parallel_relaunch_model.py") if os.environ.get("WITH_COVERAGE", "OFF") == "ON": - run_with_coverage(True) coverage_args = ["-m", "coverage", "run", "--branch", "-p"] else: coverage_args = []