-
-
Notifications
You must be signed in to change notification settings - Fork 8
notes on tune tokens #281
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
notes on tune tokens #281
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -115,10 +115,24 @@ TuningInstanceSingleCrit = R6Class("TuningInstanceSingleCrit", | |
| #' This defines the resampled performance of a learner on a task, a | ||
| #' feasibility region for the parameters the tuner is supposed to optimize, | ||
| #' and a termination criterion. | ||
| initialize = function(task, learner, resampling, measure, search_space, | ||
| initialize = function(task, learner, resampling, measure, search_space = NULL, | ||
| terminator, store_benchmark_result = TRUE, store_models = FALSE, | ||
| check_values = FALSE) { | ||
| measure = as_measure(measure) | ||
| # We might want to have the following in a function bc its used in MultiCrit as well | ||
| if (is.null(search_space)) { | ||
| # TODO: check if we can construct search space from learner$param_set using tune_tokens | ||
| tmp = learner$param_set$get_tune_pair() # return fixed_values (all but tune tokens) and search_space (from tune tokens), | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think we should have a function that returns two things that easily could be obtained by two calls, since here the two things that would happen are independent of each other.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just to emphasize that you always want to do both? If you want to have the search_space you also always should take the param_set with the remaining fixed param values. |
||
| # Question: If we have tune tokens in the learner$param_set the learner is practically "broken". So we have to clean it up in order to use it. Why not | ||
| # a) Don't use tune tokens at all (we can pass the info in get_tune_pair(tune_tokens = xxx)) | ||
| # b) Use a second slot next to $param_vals, eg. $param_vals_to_tune. (++) | ||
| # c) Only use param_set$get_values() which can filter out TuneTokens, maybe param_set$values discards TuneTokens, on the other hand if I put a lot of effort here to always treat TuneTokens and real values I could jut go with solution b) | ||
| learner$param_set$values = tmp$fixed_values #ohne tune token / das könnte inplace passieren | ||
| search_space = tmp$search_space | ||
| # we dont allow a mix of search_space and tune tokens | ||
| } else { | ||
| # TODO: check that no tune tokens exist in learner$param_set$values | ||
| } | ||
| obj = ObjectiveTuning$new(task = task, learner = learner, | ||
| resampling = resampling, measures = list(measure), | ||
| store_benchmark_result = store_benchmark_result, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,32 @@ | ||
| library(mlr3learners) | ||
|
|
||
| task = tsk("iris") | ||
| learner = lrn("classif.svm", type = "C-classification") | ||
| resampling = rsmp("holdout") | ||
| measure = msr("classif.ce") | ||
| terminator = trm("none") | ||
|
|
||
| learner$param_set$values$kernel = to_tune(c("polynomial", "radial")) | ||
| learner$param_set$values$degree = to_tune(1, 3) | ||
|
|
||
| #solution 1 | ||
| foo = learner$param_set$get_tune_pair() | ||
| learner$param_set = foo$param_set #ohne tune token / das könnte inplace passieren | ||
| search_space = foo$search_space #das was mir param_set$tune_ps() geben würde | ||
|
|
||
| #solutin 2 | ||
| learner$convert_for_tuning() # param_vals tune tokens löschen und in param_set umwandeln | ||
| search_space = learner$param_set$ | ||
| # geht das hier schlecht, wenn ich param$vals | ||
|
|
||
| instance = TuningInstanceSingleCrit$new( | ||
| task = task, | ||
| learner = learner, | ||
| resampling = resampling, | ||
| measure = measure, | ||
| search_space = search_space, | ||
| terminator = terminator | ||
| ) | ||
|
|
||
| tuner = tnr("grid_search", resolution = 1) | ||
| tuner$optimize(instance) |
Uh oh!
There was an error while loading. Please reload this page.