diff --git a/15-eco.Rmd b/15-eco.Rmd index 06b9fce7a..de5063717 100644 --- a/15-eco.Rmd +++ b/15-eco.Rmd @@ -505,22 +505,27 @@ autotuner_rf = mlr3tuning::auto_tuner( Calling the `train()`-method of the `AutoTuner`-object finally runs the hyperparameter\index{hyperparameter} tuning, and will find the optimal hyperparameter\index{hyperparameter} combination for the specified parameters. -```{r 15-eco-24, eval=TRUE, cache=TRUE, cache.lazy=FALSE} +```{r 15-eco-24, eval=FALSE, cache=TRUE, cache.lazy=FALSE} # hyperparameter tuning set.seed(08012024) -autotuner_rf$train(task, store_backends = TRUE) +autotuner_rf$train(task) ``` -```{r 15-eco-25, eval=TRUE, echo=FALSE} +```{r 15-eco-25, cache=TRUE, cache.lazy=FALSE, eval=FALSE, echo=FALSE} saveRDS(autotuner_rf, "extdata/15-tune.rds") ``` -```{r 15-eco-26, echo=FALSE} +```{r 15-eco-26, echo=FALSE, eval=FALSE} autotuner_rf = readRDS("extdata/15-tune.rds") ``` -```{r tuning-result, cache=TRUE, cache.lazy=FALSE} + + +```{r tuning-result, eval=FALSE} autotuner_rf$tuning_result +#> mtry sample.fraction min.node.size learner_param_vals x_domain regr.rmse +#> +#> 1: 4 0.878 7 0.368 ``` ### Predictive mapping @@ -528,7 +533,7 @@ autotuner_rf$tuning_result The tuned hyperparameters\index{hyperparameter} can now be used for the prediction. To do so, we only need to run the `predict` method of our fitted `AutoTuner` object. -```{r 15-eco-27, cache=TRUE, cache.lazy=FALSE, warning=FALSE} +```{r 15-eco-27, cache=TRUE, cache.lazy=FALSE, warning=FALSE, eval=FALSE} # predicting using the best hyperparameter combination autotuner_rf$predict(task) ```