3

I try using tuneParams() and resample(), both of which are from mlr package, to double check my cross-validation RMSE.
However, I could not get the 2 functions to yield the same result.

Tune parameters by mlr package:

train <- cars

invisible(library(mlr))
invisible(library(mlrMBO))
invisible(library(doParallel))

set.seed(0)
# Leaner
lrn <- makeLearner("regr.xgboost", par.vals = list(eta = 0.3, objective = "reg:linear"))
lrn <- makePreprocWrapperCaret(lrn, ppc.scale = TRUE, ppc.center = TRUE)

# Task
task <- makeRegrTask(data = train, target = "dist")

# Resampling strategy
cv_desc <- makeResampleDesc('CV', iters = 4)
cv_inst <- makeResampleInstance(cv_desc, task = task)

# Parameter set
ps <- makeParamSet(
  makeIntegerParam("nrounds", lower = 30, upper = 60),
  makeNumericParam("lambda", lower = 0, upper = 1),
  makeNumericParam("alpha", lower = 0, upper = 1)
)

# Control
mbo.ctrl <- makeMBOControl()
mbo.ctrl <- setMBOControlTermination(mbo.ctrl, iters = 50)
ctrl <- mlr:::makeTuneControlMBO(mbo.control = mbo.ctrl)

# Tune model:
cl <- makeCluster(detectCores(), type='PSOCK')
registerDoParallel(cl)
params_res <- tuneParams(lrn, task, cv_inst, par.set = ps, control = ctrl, 
  show.info = FALSE, measures = mlr::rmse)
registerDoSEQ()
print(params_res)

Attempt to reproduce the RMSE with resample function:

set.seed(0)
lrn <- makeLearner("regr.xgboost", par.vals = params_res$x)
lrn <- makePreprocWrapperCaret(lrn, ppc.scale = TRUE, ppc.center = TRUE)
r = resample(lrn, task, cv_inst, measures = mlr::rmse)
mean(r$measures.test$rmse)
John
  • 71
  • 2
  • 6
  • 2
    You're doing different things -- in the top piece of code, you're tuning parameters, in the bottom piece you're not. Why do you expect to get the same result? – Lars Kotthoff May 18 '19 at 18:00

1 Answers1

2

The aggregation measure that mlr uses are described in the tutorial.

For RMSE, test.rmse is used. That means, all test performances are aggregated via RMSE, not the arithmetic mean.

train <- cars

#invisible(library(mlr))
invisible(library(mlrMBO))
#> Loading required package: mlr
#> Loading required package: ParamHelpers
#> Registered S3 methods overwritten by 'ggplot2':
#>   method         from 
#>   [.quosures     rlang
#>   c.quosures     rlang
#>   print.quosures rlang
#> Loading required package: smoof
#> Loading required package: BBmisc
#> 
#> Attaching package: 'BBmisc'
#> The following object is masked from 'package:base':
#> 
#>     isFALSE
#> Loading required package: checkmate
invisible(library(doParallel))
#> Loading required package: foreach
#> Loading required package: iterators
#> Loading required package: parallel

set.seed(0)
# Leaner
lrn <- makeLearner("regr.xgboost", par.vals = list(eta = 0.3, objective = "reg:linear"))
#> Warning in makeParam(id = id, type = "numeric", learner.param = TRUE, lower = lower, : NA used as a default value for learner parameter missing.
#> ParamHelpers uses NA as a special value for dependent parameters.
lrn <- makePreprocWrapperCaret(lrn, ppc.scale = TRUE, ppc.center = TRUE)

# Task
task <- makeRegrTask(data = train, target = "dist")

# Resampling strategy
cv_desc <- makeResampleDesc('CV', iters = 4)
cv_inst <- makeResampleInstance(cv_desc, task = task)

# Parameter set
ps <- makeParamSet(
  makeIntegerParam("nrounds", lower = 30, upper = 60),
  makeNumericParam("lambda", lower = 0, upper = 1),
  makeNumericParam("alpha", lower = 0, upper = 1)
)

# Control
mbo.ctrl <- makeMBOControl()
mbo.ctrl <- setMBOControlTermination(mbo.ctrl, iters = 50)
ctrl <- mlr:::makeTuneControlMBO(mbo.control = mbo.ctrl)

# Tune model:
cl <- makeCluster(detectCores(), type='PSOCK')
registerDoParallel(cl)
params_res <- tuneParams(lrn, task, cv_inst, par.set = ps, control = ctrl, 
  show.info = FALSE, measures = mlr::rmse)
registerDoSEQ()
print(params_res)
#> Tune result:
#> Op. pars: nrounds=30; lambda=0.994; alpha=1
#> rmse.test.rmse=17.4208912

lrn <- makeLearner("regr.xgboost", par.vals = params_res$x)
#> Warning in makeParam(id = id, type = "numeric", learner.param = TRUE, lower = lower, : NA used as a default value for learner parameter missing.
#> ParamHelpers uses NA as a special value for dependent parameters.
lrn <- makePreprocWrapperCaret(lrn, ppc.scale = TRUE, ppc.center = TRUE)
r = resample(lrn, task, cv_inst, measures = mlr::rmse)
#> Resampling: cross-validation
#> Measures:             rmse
#> [Resample] iter 1:    17.0026234
#> [Resample] iter 2:    16.5500225
#> [Resample] iter 3:    21.5016809
#> [Resample] iter 4:    13.7344482
#> 
#> Aggregated Result: rmse.test.rmse=17.4208912
#> 

all.equal(as.numeric(r$aggr), as.numeric(params_res$y))
#> [1] TRUE

all.equal(sqrt(mean(r$measures.test$rmse^2)), as.numeric(params_res$y))
#> [1] TRUE

Created on 2019-05-18 by the reprex package (v0.2.1)

pat-s
  • 5,992
  • 1
  • 32
  • 60