How I can make a list of learners including autofselector and autotuner in benchmark and compare their performance?
I wonder how to rank learners stratified by task when we have multiple tasks
library(mlr3verse)
mod1 = AutoTuner$new(
learner = lrn("surv.svm", type = "hybrid", diff.meth = "makediff3",
gamma.mu = c(0.1, 0.1)),
resampling = rsmp("holdout"),
measure = msr("surv.cindex"),
terminator = trm("evals", n_evals = 10),
tuner = tnr("random_search"))
mod2 = AutoFSelector$new(
learner = as_learner(
po("imputemedian", affect_columns = selector_type("numeric")) %>>%
po("imputemode", affect_columns = selector_type("factor")) %>>%
po("scale") %>>%
po("encode", method = "one-hot") %>>%
lrn("surv.coxph")) ,
resampling = rsmp("holdout"),
measure = msr("surv.cindex"),
terminator = trm("evals", n_evals = 100),
fselector = fs("sequential", strategy ="sbs"))
lrns = c(mod1, mod1)
design = benchmark_grid(tasks = tsks(c("actg", "rats")),
learners = lrns,
resamplings = rsmp("holdout"))
bmr = benchmark(design, store_models = TRUE, store_backends = TRUE)