2

I don't get similar results when I use the same data and models using mlr and mlr3. Also I find mlr runs at least 20-fold faster. I used lung data from survival and I was able to replicate the difference in computation speed and results since I can't share my data.

mlr was completed in 1 min with C-index generally low compared to mlr3 that took 21 min to complete with C-index being much higher despite using same data, same preprocessing, same model and setting seed. Different Results using MLR and MLR3

library(tidyverse)
library(tidymodels)
library(PKPDmisc)
library(mlr)
library(parallelMap)
library(survival)

# Data and Data Splitting  
data = as_tibble(lung) %>% 
       mutate(status = if_else(status==1, 0, 1),
              sex = factor(sex, levels = c(1:2), labels = c("male", "female")),
              ph.ecog = factor(ph.ecog))

na              <- sample(1:228, 228*0.1)
data$sex[na]    <- NA
data$ph.ecog[na]<- NA

set.seed(123)
split <- data  %>% initial_split(prop = 0.8, strata = status)    
train <- split %>% training()
test  <- split %>% testing()

# Task
task = makeSurvTask(id = "Survival", data = train, target = c("time", "status")) 

# Resample 
# For model assessment before external validation on test data
set.seed(123)
outer_cv  = makeResampleDesc("CV", iter=4, stratify.cols = c("status")) %>% 
  makeResampleInstance(task) 

# For feature selection and parameter tuning
set.seed(123)
inner_cv  = makeResampleDesc("CV", iter=4, stratify.cols = c("status"))

# Learners
cox1      = makeLearner(id = "COX1", "surv.coxph") %>%
            makeImputeWrapper(classes = list(factor = imputeMode(), numeric = imputeMedian()),
                              # Create dummy variable for factor features
                              dummy.classes = "factor") %>%
            makePreprocWrapperCaret(ppc.center = TRUE, ppc.scale = TRUE) %>%
            makeFeatSelWrapper(resampling = inner_cv, show.info = TRUE,
                               control = makeFeatSelControlSequential(method = "sfs"))
  
cox_lasso = makeLearner(id = "COX LASSO", "surv.glmnet") %>% 
            makeImputeWrapper(classes = list(factor = imputeMode(), numeric = imputeMedian()),
                              # Create dummy variable for factor features
                              dummy.classes = "factor") %>%
            # Normalize numeric features
            makePreprocWrapperCaret(ppc.center = TRUE, ppc.scale = TRUE) %>%
            makeTuneWrapper(resampling = inner_cv, show.info = TRUE, 
                            par.set    =  makeParamSet(makeNumericParam("lambda",lower = -3, upper = 0,
                                                                        trafo = function(x) 10^x)),
                            control    = makeTuneControlGrid(resolution = 10L))
  
cox_net   = makeLearner(id = "COX NET",  "surv.glmnet") %>% 
            makeImputeWrapper(classes = list(factor = imputeMode(), numeric = imputeMedian()),
                              # Create dummy variable for factor features
                              dummy.classes = "factor") %>%
            # Normalize numeric features
            makePreprocWrapperCaret(ppc.center = TRUE, ppc.scale = TRUE) %>%
            makeTuneWrapper(resampling = inner_cv, show.info = TRUE, 
                            par.set    =  makeParamSet(makeNumericParam("alpha", lower = 0,  upper = 1,
                                                                        trafo = function(x) round(x,2)),
                                                       makeNumericParam("lambda",lower = -3, upper = 1,
                                                                        trafo = function(x) 10^x)),
                            control    = makeTuneControlGrid(resolution = 10L))

# Benchmark 
# parallelStartSocket(4)
start_time <- Sys.time() 

set.seed(123)
mlr_bmr   = benchmark(learners    = list(cox1, cox_lasso, cox_net),
                      tasks       = task,
                      resamplings = outer_cv,
                      keep.extract= TRUE, 
                      models      = TRUE)

end_time <- Sys.time()
mlr_time = end_time - start_time
# parallelStop()

mlr_res <- getBMRPerformances(mlr_bmr, as.df = TRUE) %>% 
  select(Learner = learner.id, Task = task.id, Cindex  = cindex) %>% 
  mutate(Color_Package  = "mlr",
         Learner = word(str_replace(Learner, "\\.", " "), 1, -2))

##################################################################
library(mlr3verse)

# Task 
task2 = TaskSurv$new(id = "Survival2", backend = train, time =  "time", event   = "status")
task2$col_roles$stratum = c("status")

# Resmaple 
set.seed(123)
outer_cv2  = rsmp("cv", folds = 4)$instantiate(task2)  

# For feature selection and parameter tuning
set.seed(123)
inner_cv2  = rsmp("cv", folds = 4) 

# Learners
preproc    = po("imputemedian", affect_columns = selector_type("numeric")) %>>% 
             po("imputemode",   affect_columns = selector_type("factor"))  %>>% 
             po("scale")                                                   %>>%
             po("encode") 
  
cox2       = AutoFSelector$new(learner    = as_learner(preproc %>>% 
                                                       lrn("surv.coxph")), 
                               resampling = inner_cv2,
                               measure    = msr("surv.cindex"),
                               terminator = trm("none"), # need to increase later 
                               fselector  = fs("sequential", strategy = "sfs")) # sfs is the default
cox2$id = "COX1"

cox_lasso2 = AutoTuner$new(learner    = as_learner(preproc %>>%
                                                   lrn("surv.glmnet", 
                                                       lambda = to_tune(p_dbl(lower = -3, upper = 0, 
                                                                              trafo = function(x) 10^x)))),
                           resampling = inner_cv2,
                           measure    = msr("surv.cindex"),
                           terminator = trm("none"), 
                           tuner      = tnr("grid_search", resolution = 10)) 
cox_lasso2$id = "COX LASSO"

cox_net2   = AutoTuner$new(learner    = as_learner(preproc %>>%
                                                   lrn("surv.glmnet", 
                                                       alpha  = to_tune(p_dbl(lower = 0, upper = 1)),
                                                       lambda = to_tune(p_dbl(lower = -3, upper = 1, 
                                                                              trafo = function(x) 10^x)))),
                           resampling = inner_cv2,
                           measure    = msr("surv.cindex"),
                           terminator = trm("none"), 
                           tuner      = tnr("grid_search", resolution = 10)) 
cox_net2$id = "COX NET"

# Benchmark
desgin  =  benchmark_grid(tasks       = task2,
                          learners    = c(cox2, cox_lasso2, cox_net2),
                          resamplings = outer_cv2)  

# future::plan("multisession")
# Error: Output type of PipeOp select during training (Task) incompatible with input type of PipeOp surv.coxph (TaskSurv)
start_time <- Sys.time() 

set.seed(123)
mlr3_bmr = mlr3::benchmark(desgin)

end_time <- Sys.time()
mlr3_time = end_time - start_time

mlr3_res <- as.data.table(mlr3_bmr$score()) %>% 
  select(Task=task_id, Learner=learner_id, Cindex=surv.harrell_c) %>% 
  mutate(Color_Package  = "mlr3") 

mlr_res %>% 
  bind_rows(mlr3_res) %>% 
  ggplot(aes(Learner, Cindex, fill= Color_Package )) + 
  geom_boxplot(position=position_dodge(.8)) + 
  stat_summary(fun= mean, geom = "point", aes(group = Color_Package ),
               position=position_dodge(.8), size = 3) +
  labs(x="", y = " C-Index") + 
  theme_bw() + base_theme() + theme(legend.position = "top")
Ali Alhadab
  • 101
  • 5
  • You can speed up `mlr3tuning` by using a larger batch size. Use `tnr("grid_search", resolution = 10, batch_size = 100)`. I cannot comment the performance differences. – be-marc Jan 19 '22 at 09:57
  • You also might want to use the latest `mlr3tuning` dev version from gh which should be faster. Generally, `mlr3tuning` has more overhead than `mlr` but this gets neglectable when you tune complex models and not just for one minute. – be-marc Jan 19 '22 at 10:03

0 Answers0