Skip to content

Commit

Permalink
[XPU] default no autotune (PaddlePaddle#62636)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhupengyang committed Mar 12, 2024
1 parent d527fb5 commit fde63d1
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 2 deletions.
5 changes: 5 additions & 0 deletions paddle/fluid/inference/api/analysis_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,11 @@ void AnalysisConfig::EnableXpu(int l3_size,
bool transformer_encoder_adaptive_seqlen,
bool enable_multi_stream) {
#if defined(PADDLE_WITH_XPU) || defined(LITE_SUBGRAPH_WITH_XPU)
LOG_FIRST_N(WARNING, 1)
<< "Parameters in EnableXpu/enable_xpu is deprecated since version "
"2.6.1, and will be removed in version 3.0! Please use "
"EnableXpu/enable_xpu without parameters, and use "
"SetXpuConfig/set_xpu_config to set options.";
use_xpu_ = true;
xpu_config_.l3_size = l3_size;
xpu_config_.conv_autotune_level = conv_autotune;
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/inference/api/paddle_analysis_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ struct PD_INFER_DECL XpuConfig {
bool conv_autotune_file_writeback{false};

// Fc autotune level. The Optional values are 0-9. Default 0 means no
// autotune.
int fc_autotune_level{0};
// Base fc autotune info is read from fc_autotune_file.
std::string fc_autotune_file;
Expand Down Expand Up @@ -367,7 +368,7 @@ struct PD_INFER_DECL AnalysisConfig {
///
void EnableXpu(int l3_size = 0xfffc00,
bool l3_locked = false,
bool conv_autotune = true,
bool conv_autotune = false,
const std::string& conv_autotune_file = "",
const std::string& transformer_encoder_precision = "int16",
bool transformer_encoder_adaptive_seqlen = false,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/inference_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -800,7 +800,7 @@ void BindAnalysisConfig(py::module *m) {
&AnalysisConfig::EnableXpu,
py::arg("l3_size") = 16 * 1024 * 1024,
py::arg("l3_locked") = false,
py::arg("conv_autotune") = true,
py::arg("conv_autotune") = false,
py::arg("conv_autotune_file") = "",
py::arg("transformer_encoder_precision") = "int16",
py::arg("transformer_encoder_adaptive_seqlen") = false,
Expand Down

0 comments on commit fde63d1

Please sign in to comment.