diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index bfa87667ed3cb..b7e811e4c64d6 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1060,13 +1060,6 @@ std::unique_ptr CreatePaddlePredictor< gflags.push_back(flag); } -// TODO(wilber): jetson tx2 may fail to run the model due to insufficient memory -// under the native_best_fit strategy. Modify the default allocation strategy to -// auto_growth. todo, find a more appropriate way to solve the problem. -#ifdef WITH_NV_JETSON - gflags.push_back("--allocator_strategy=auto_growth"); -#endif - // TODO(Shixiaowei02): Add a mandatory scheme to use the thread local // allocator when multi-stream is enabled. if (config.thread_local_stream_enabled()) { diff --git a/paddle/fluid/platform/flags.cc b/paddle/fluid/platform/flags.cc index e417b4fd8694b..4e47c130c7252 100644 --- a/paddle/fluid/platform/flags.cc +++ b/paddle/fluid/platform/flags.cc @@ -364,11 +364,7 @@ PADDLE_DEFINE_EXPORTED_double( * Example: * Note: For selecting allocator policy of PaddlePaddle. */ -#ifdef PADDLE_ON_INFERENCE -static constexpr char kDefaultAllocatorStrategy[] = "naive_best_fit"; -#else static constexpr char kDefaultAllocatorStrategy[] = "auto_growth"; -#endif PADDLE_DEFINE_EXPORTED_string( allocator_strategy, kDefaultAllocatorStrategy, "The allocation strategy, enum in [naive_best_fit, auto_growth]. "