Python hyperopt.hp.uniform() Examples
The following are 30
code examples of hyperopt.hp.uniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
hyperopt.hp
, or try the search function
.
Example #1
Source File: automl.py From kddcup2019-automl with MIT License | 8 votes |
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config): X_train, X_val, y_train, y_val = data_split(X, y, test_size=0.5) train_data = lgb.Dataset(X_train, label=y_train) valid_data = lgb.Dataset(X_val, label=y_val) space = { "max_depth": hp.choice("max_depth", np.arange(2, 10, 1, dtype=int)), # smaller than 2^(max_depth) "num_leaves": hp.choice("num_leaves", np.arange(4, 200, 4, dtype=int)), "feature_fraction": hp.quniform("feature_fraction", 0.2, 0.8, 0.1), # "bagging_fraction": hp.quniform("bagging_fraction", 0.2, 0.8, 0.1), # "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 2, dtype=int)), # "scale_pos_weight":hp.uniform('scale_pos_weight',1.0, 10.0), # "colsample_by_tree":hp.uniform("colsample_bytree",0.5,1.0), "min_child_weight": hp.quniform('min_child_weight', 2, 50, 2), "reg_alpha": hp.uniform("reg_alpha", 2.0, 8.0), "reg_lambda": hp.uniform("reg_lambda", 2.0, 8.0), "learning_rate": hp.quniform("learning_rate", 0.05, 0.4, 0.01), # "learning_rate": hp.loguniform("learning_rate", np.log(0.04), np.log(0.5)), # "min_data_in_leaf": hp.choice('min_data_in_leaf', np.arange(200, 2000, 100, dtype=int)), #"is_unbalance": hp.choice("is_unbalance", [True]) } def objective(hyperparams): model = lgb.train({**params, **hyperparams}, train_data, 300, valid_data, early_stopping_rounds=45, verbose_eval=0) score = model.best_score["valid_0"][params["metric"]] # in classification, less is better return {'loss': -score, 'status': STATUS_OK} trials = Trials() best = hyperopt.fmin(fn=objective, space=space, trials=trials, algo=tpe.suggest, max_evals=150, verbose=1, rstate=np.random.RandomState(1)) hyperparams = space_eval(space, best) log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}") return hyperparams
Example #2
Source File: hyperparams.py From pykg2vec with MIT License | 6 votes |
def __init__(self): self.lmbda = [0.1, 0.2] self.feature_map_dropout = [0.1, 0.2, 0.5] self.input_dropout = [0.1, 0.2, 0.5] self.hidden_dropout = [0.1, 0.2, 0.5] self.use_bias = [True, False] self.label_smoothing = [0.1, 0.2, 0.5] self.lr_decay = [0.95, 0.9, 0.8] self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1] self.L1_flag = [True, False] self.hidden_size = [8, 16, 32, 64, 128, 256] self.batch_size = [128, 256, 512] self.epochs = [2, 5, 10] self.margin = [0.4, 1.0, 2.0] self.optimizer = ["adam", "sgd", 'rms'] self.sampling = ["uniform", "bern"]
Example #3
Source File: hyperparams.py From pykg2vec with MIT License | 6 votes |
def __init__(self): self.lmbda = [0.1, 0.2] self.feature_map_dropout = [0.1, 0.2, 0.5] self.input_dropout = [0.1, 0.2, 0.5] self.hidden_dropout = [0.1, 0.2, 0.5] self.use_bias = [True, False] self.label_smoothing = [0.1, 0.2, 0.5] self.lr_decay = [0.95, 0.9, 0.8] self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1] self.L1_flag = [True, False] self.hidden_size = [8, 16] self.batch_size = [256, 512] self.epochs = [2, 5, 10] self.margin = [0.4, 1.0, 2.0] self.optimizer = ["adam", "sgd", 'rms'] self.sampling = ["uniform", "bern"]
Example #4
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 6 votes |
def test_operator_in(self): a_or_b = configuration_space.CategoricalHyperparameter("a_or_b", ["a", "b"]) cond_a = configuration_space.UniformFloatHyperparameter( 'cond_a', 0, 1, conditions=[['a_or_b == a']]) cond_b = configuration_space.UniformFloatHyperparameter( 'cond_b', 0, 3, q=0.1, conditions=[['a_or_b == b']]) e = configuration_space.UniformFloatHyperparameter("e", 0, 5, conditions=[['a_or_b in {a,b}']]) conditional_space_operator_in = {"a_or_b": a_or_b, "cond_a": cond_a, "cond_b": cond_b, "e": e} cs = self.pyll_writer.write(conditional_space_operator_in) expected = StringIO.StringIO() expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll') expected.write('\n\n') expected.write('param_0 = hp.uniform("cond_a", 0.0, 1.0)\n') expected.write('param_1 = hp.quniform("cond_b", -0.0499, 3.05, 0.1)\n') expected.write('param_2 = hp.uniform("e", 0.0, 5.0)\n') expected.write('param_3 = hp.choice("a_or_b", [\n') expected.write(' {"a_or_b": "a", "cond_a": param_0, "e": param_2, ' '},\n') expected.write(' {"a_or_b": "b", "cond_b": param_1, "e": param_2, ' '},\n') expected.write(' ])\n\n') expected.write('space = {"a_or_b": param_3}\n') self.assertEqual(expected.getvalue(), cs)
Example #5
Source File: mixed.py From asreview with Apache License 2.0 | 6 votes |
def full_hyper_space(self): from hyperopt import hp space_1, choices_1 = self.query_model1.hyper_space() space_2, choices_2 = self.query_model2.hyper_space() parameter_space = {} hyper_choices = {} for key, value in space_1.items(): new_key = "qry_" + self.strategy_1 + key[4:] parameter_space[new_key] = value hyper_choices[new_key] = choices_1[key] for key, value in space_2.items(): new_key = "qry_" + self.strategy_2 + key[4:] parameter_space[new_key] = value hyper_choices[new_key] = choices_2[key] parameter_space["qry_mix_ratio"] = hp.uniform( "qry_mix_ratio", 0, 1) return parameter_space, hyper_choices
Example #6
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 6 votes |
def test_write_uniform_int(self): a_int = configuration_space.UniformIntegerHyperparameter("a_int", 0, 1) expected = ('a_int', 'param_0 = pyll.scope.int(hp.quniform(' '"a_int", -0.49999, 1.5, 1.0))') value = self.pyll_writer.write_hyperparameter(a_int, None) self.assertEqual(expected, value) # Test for the problem that if a parameter has Q not None and is on # log scale, the Q must not be in the hp object, but the # hyperparameter name. If this is done the other way round, # the log-value of the hyperparameter is quantized a_int = configuration_space.UniformIntegerHyperparameter( "a_int", 1, 1000, base=10) a_int.name = self.pyll_writer.convert_name(a_int) expected = ('LOG10_Q1_a_int', 'param_1 = hp.uniform(' '"LOG10_Q1_a_int", -0.301021309861, 3.00021709297)') value = self.pyll_writer.write_hyperparameter(a_int, None) self.assertEqual(expected, value)
Example #7
Source File: main.py From pyprophet with BSD 3-Clause "New" or "Revised" License | 6 votes |
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter): """ Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data. """ if outfile is None: outfile = infile else: outfile = outfile # Prepare XGBoost-specific parameters xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33} xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'} xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'} if not apply_weights: PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, ss_score_filter).run() else: PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights, ss_score_filter).run() # IPF
Example #8
Source File: test_tune_restore.py From ray with Apache License 2.0 | 6 votes |
def set_basic_conf(self): space = { "x": hp.uniform("x", 0, 10), "y": hp.uniform("y", -10, 10), "z": hp.uniform("z", -10, 0) } def cost(space, reporter): loss = space["x"]**2 + space["y"]**2 + space["z"]**2 reporter(loss=loss) search_alg = HyperOptSearch( space, metric="loss", mode="min", random_state_seed=5, n_initial_points=1, max_concurrent=1000 # Here to avoid breaking back-compat. ) return search_alg, cost
Example #9
Source File: search2.py From PyTorchText with MIT License | 5 votes |
def target(args): w1,w2,w3 = args r = a + b*w1 +c*w2 + d*w3 result = r.topk(5,1)[1] predict_label_and_marked_label_list = [[_1,_2] for _1,_2 in zip(result,true_labels)] score,_,_,_ = get_score(predict_label_and_marked_label_list) print (args,score,_)#list_space = [hp.uniform('a',0,1),hp.uniform('b',0,1)] return -score
Example #10
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_read_switch(self): # 0 switch # 1 hyperopt_param # 2 Literal{dist1} # 3 randint # 4 Literal{2} # 5 Literal{uniform} # 6 Literal{normal} dist = hp.choice('dist1', ['uniform', 'normal']) ret = self.pyll_reader.read_switch(dist) expected = configuration_space.CategoricalHyperparameter( 'dist1', ['uniform', 'normal']) self.assertEqual(expected, ret) bigger_choice = hp.choice('choice', [ {'choice': "zero", 'a': 0, 'b': hp.uniform('b', 0, 10)}, {'choice': "other", 'a': 1, 'b': hp.uniform('b', 0, 10)}]) ret = self.pyll_reader.read_switch(bigger_choice) expected = configuration_space.CategoricalHyperparameter( 'choice', ['zero', 'other']) self.assertEqual(expected, ret) self.assertEqual(2, len(self.pyll_reader.constants)) # Only the hyperparameter b is put into pyll_reader.hyperparameters self.assertEqual(1, len(self.pyll_reader.hyperparameters)) # TODO: duplicate these tests for Integer/care about integers + test if # the warning of non-uniform parameters is actually printed
Example #11
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_read_uniform(self): # 0 float # 1 hyperopt_param # 2 Literal{scale_mult1} # 3 uniform # 4 Literal{0.2} # 5 Literal{2} uniform = hp.uniform('scale_mult1', .2, 2).inputs()[0].inputs()[1] ret = self.pyll_reader.read_uniform(uniform, 'scale_mult1') expected = configuration_space.UniformFloatHyperparameter('scale_mult1', 0.2, 2) self.assertEqual(expected, ret)
Example #12
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_convert_configuration_space(self): a = configuration_space.UniformFloatHyperparameter("a", 0, 1) b = configuration_space.UniformFloatHyperparameter("b", 0, 3, q=0.1) expected = StringIO.StringIO() expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll') expected.write('\n\n') expected.write('param_0 = hp.uniform("a", 0.0, 1.0)\n') expected.write('param_1 = hp.quniform("b", -0.0499, 3.05, 0.1)\n\n') expected.write('space = {"a": param_0, "b": param_1}\n') simple_space = {"a": a, "b": b} cs = self.pyll_writer.write(simple_space) self.assertEqual(expected.getvalue(), cs)
Example #13
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_convert_complex_space(self): cs = self.pyll_writer.write(config_space) expected = StringIO.StringIO() expected.write('from hyperopt import hp\nimport hyperopt.pyll as pyll') expected.write('\n\n') expected.write('param_0 = hp.uniform("LOG2_C", -5.0, 15.0)\n') expected.write('param_1 = hp.uniform("LOG2_gamma", -14.9999800563, ' '3.0)\n') expected.write('param_2 = hp.choice("kernel", [\n') expected.write(' {"kernel": "linear", },\n') expected.write(' {"kernel": "rbf", "LOG2_gamma": param_1, },\n') expected.write(' ])\n') expected.write('param_3 = hp.uniform("lr", 0.0001, 1.0)\n') expected.write('param_4 = pyll.scope.int(hp.quniform(' '"neurons", 15.50001, 1024.5, 16.0))\n') expected.write('param_5 = hp.choice("classifier", [\n') expected.write(' {"classifier": "nn", "lr": param_3, "neurons": ' 'param_4, },\n') expected.write(' {"classifier": "svm", "LOG2_C": param_0, ' '"kernel": param_2, },\n') expected.write(' ])\n') expected.write('param_6 = hp.choice("preprocessing", [\n') expected.write(' {"preprocessing": "None", },\n') expected.write(' {"preprocessing": "pca", },\n') expected.write(' ])\n\n') expected.write('space = {"classifier": param_5, ' '"preprocessing": param_6}\n') self.assertEqual(expected.getvalue(), cs) self.pyll_writer.reset_hyperparameter_countr() expected.seek(0) cs = self.pyll_writer.write(config_space_2) self.assertEqual(expected.getvalue().replace("gamma", "gamma_2"), cs)
Example #14
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_uniform(self): a = configuration_space.UniformFloatHyperparameter("a", 0, 1) expected = ('a', 'param_0 = hp.uniform("a", 0.0, 1.0)') value = self.pyll_writer.write_hyperparameter(a, None) self.assertEqual(expected, value) # The hyperparameter name has to be converted seperately because # otherwise the parameter values are converted at object costruction # time a = configuration_space.UniformFloatHyperparameter("a", 1, 10, base=10) a.name = self.pyll_writer.convert_name(a) expected = ('LOG10_a', 'param_1 = hp.uniform("LOG10_a", 0.0, 1.0)') value = self.pyll_writer.write_hyperparameter(a, None) self.assertEqual(expected, value) nhid1 = configuration_space.UniformFloatHyperparameter( "nhid1", 16, 1024, q=16, base=np.e) expected = ('nhid1', 'param_2 = hp.qloguniform(' '"nhid1", 2.0794540416, 6.93925394604, 16.0)') value = self.pyll_writer.write_hyperparameter(nhid1, None) self.assertEqual(expected, value)
Example #15
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_quniform(self): b = configuration_space.UniformFloatHyperparameter("b", 0, 3, q=0.1) expected = ("b", 'param_0 = hp.quniform("b", -0.0499, 3.05, 0.1)') value = self.pyll_writer.write_hyperparameter(b, None) self.assertEqual(expected, value) b = configuration_space.UniformFloatHyperparameter( "b", 0.1, 3, q=0.1, base=10) b.name = self.pyll_writer.convert_name(b) expected = ('LOG10_Q0.100000_b', 'param_1 = hp.uniform(' '"LOG10_Q0.100000_b", -1.30016227413, 0.484299839347)') value = self.pyll_writer.write_hyperparameter(b, None) self.assertEqual(expected, value)
Example #16
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_normal_int(self): parameter = configuration_space.NormalIntegerHyperparameter("e", 0, 1) expected = ('e', 'param_0 = pyll.scope.int(hp.qnormal("e", 0.0, 1.0, 1.0))') value = self.pyll_writer.write_hyperparameter(parameter, None) self.assertEqual(expected, value) parameter = configuration_space.NormalIntegerHyperparameter( "e", 0, 1, base=10) parameter.name = self.pyll_writer.convert_name(parameter) # TODO: this is an example of non-uniform sampling expected = ('LOG10_Q1_e', 'param_1 = hp.normal("LOG10_Q1_e", 0.0, 1.0)') value = self.pyll_writer.write_hyperparameter(parameter, None) self.assertEqual(expected, value)
Example #17
Source File: hyperopt.py From BTB with MIT License | 5 votes |
def _search_space_from_dict(dict_hyperparams): hyperparams = {} if not isinstance(dict_hyperparams, dict): raise TypeError('Hyperparams must be a dictionary.') for name, hyperparam in dict_hyperparams.items(): hp_type = hyperparam['type'] if hp_type == 'int': hp_range = hyperparam.get('range') or hyperparam.get('values') hp_min = min(hp_range) if hp_range else None hp_max = max(hp_range) if hp_range else None hp_instance = hp.uniformint(name, hp_min, hp_max) elif hp_type == 'float': hp_range = hyperparam.get('range') or hyperparam.get('values') hp_min = min(hp_range) hp_max = max(hp_range) hp_instance = hp.uniform(name, hp_min, hp_max) elif hp_type == 'bool': hp_instance = hp.choice(name, [True, False]) elif hp_type == 'str': hp_choices = hyperparam.get('range') or hyperparam.get('values') hp_instance = hp.choice(name, hp_choices) hyperparams[name] = hp_instance return hyperparams
Example #18
Source File: triple.py From asreview with Apache License 2.0 | 5 votes |
def full_hyper_space(self): from hyperopt import hp parameter_space = { "bal_a": hp.lognormal("bal_a", 0, 1), "bal_alpha": hp.uniform("bal_alpha", 0, 2), "bal_b": hp.uniform("bal_b", 0, 1), # "bal_zero_beta": hp.uniform("bal_zero_beta", 0, 2), "bal_c": hp.uniform("bal_c", 0, 1), # "bal_zero_max_gamma": hp.uniform("bal_zero_max_gamma", 0.01, 2) } return parameter_space, {}
Example #19
Source File: double.py From asreview with Apache License 2.0 | 5 votes |
def full_hyper_space(self): from hyperopt import hp parameter_space = { "bal_a": hp.lognormal("bal_a", 0, 1), "bal_alpha": hp.uniform("bal_alpha", 0, 2), "bal_b": hp.uniform("bal_b", 0, 1), # "bal_beta": hp.uniform("bal_beta", 0, 2), } return parameter_space, {}
Example #20
Source File: lstm_pool.py From asreview with Apache License 2.0 | 5 votes |
def full_hyper_space(self): from hyperopt import hp hyper_choices = {} hyper_space = { "mdl_dropout": hp.uniform("mdl_dropout", 0, 0.9), "mdl_lstm_out_width": hp.quniform("mdl_lstm_out_width", 1, 50, 1), "mdl_dense_width": hp.quniform("mdl_dense_width", 1, 200, 1), "mdl_learn_rate_mult": hp.lognormal("mdl_learn_rate_mult", 0, 1) } return hyper_space, hyper_choices
Example #21
Source File: lstm_base.py From asreview with Apache License 2.0 | 5 votes |
def full_hyper_space(self): from hyperopt import hp hyper_choices = {} hyper_space = { "mdl_dropout": hp.uniform("mdl_dropout", 0, 0.9), "mdl_lstm_out_width": hp.quniform("mdl_lstm_out_width", 1, 50, 1), "mdl_dense_width": hp.quniform("mdl_dense_width", 1, 200, 1), "mdl_learn_rate_mult": hp.lognormal("mdl_learn_rate_mult", 0, 1) } return hyper_space, hyper_choices
Example #22
Source File: hyperparameter.py From hypermax with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getLog10Cardinality(self): if 'anyOf' in self.config or 'oneOf' in self.config: if 'anyOf' in self.config: data = self.config['anyOf'] else: data = self.config['oneOf'] log10_cardinality = Hyperparameter(data[0], self, self.root + ".0").getLog10Cardinality() for index, subParam in enumerate(data[1:]): # We used logarithm identities to create this reduction formula other_log10_cardinality = Hyperparameter(subParam, self, self.root + "." + str(index)).getLog10Cardinality() # Revert to linear at high and low values, for numerical stability. Check here: https://www.desmos.com/calculator/efkbbftd18 to observe if (log10_cardinality - other_log10_cardinality) > 3: log10_cardinality = log10_cardinality + 1 elif (other_log10_cardinality - log10_cardinality) > 3: log10_cardinality = other_log10_cardinality + 1 else: log10_cardinality = other_log10_cardinality + math.log10(1 + math.pow(10, log10_cardinality - other_log10_cardinality)) return log10_cardinality + math.log10(len(data)) elif 'enum' in self.config: return math.log10(len(self.config['enum'])) elif 'constant' in self.config: return math.log10(1) elif self.config['type'] == 'object': log10_cardinality = 0 for index, subParam in enumerate(self.config['properties'].values()): subParameter = Hyperparameter(subParam, self, self.root + "." + str(index)) log10_cardinality += subParameter.getLog10Cardinality() return log10_cardinality elif self.config['type'] == 'number': if 'rounding' in self.config: return math.log10(min(20, (self.config['max'] - self.config['min']) / self.config['rounding'] + 1)) else: return math.log10(20) # Default of 20 for fully uniform numbers.
Example #23
Source File: svm_surrogate.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): from hyperopt import hp space = { 'x0': hp.uniform("x0", -10., 10.), 'x1': hp.uniform("x1", -10., 10.), } return(space)
Example #24
Source File: bnn_worker.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): from hyperopt import hp import numpy as np space = { 'l_rate': hp.loguniform('l_rate', np.log(1e-6), np.log(1e-1)), 'burn_in': hp.uniform('burn_in', 0, .8), 'n_units_1': hp.qloguniform('n_units_1', np.log(16), np.log(512), 1), 'n_units_2': hp.qloguniform('n_units_2', np.log(16), np.log(512), 1), 'mdecay': hp.uniform('mdecay', 0, 1) } return(space)
Example #25
Source File: cartpole_worker.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): import numpy as np from hyperopt import hp space = { 'learning_rate': hp.loguniform('learning_rate', np.log(1e-7), np.log(1e-1)), 'batch_size': hp.qloguniform('batch_size', np.log(8), np.log(256), 1), 'n_units_1': hp.qloguniform('n_units_1', np.log(8), np.log(128), 1), 'n_units_2': hp.qloguniform('n_units_2', np.log(8), np.log(128), 1), 'discount': hp.uniform('discount', 0, 1), 'likelihood_ratio_clipping': hp.uniform('likelihood_ratio_clipping', 0, 1), 'entropy_regularization': hp.uniform('entropy_regularization', 0, 1) } return(space)
Example #26
Source File: paramnet_surrogates.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): from hyperopt import hp space = { 'x0': hp.uniform("x0", -6., -2.), 'x1': hp.uniform("x1", 3., 8.), 'x2': hp.uniform("x2", 4., 8.), 'x3': hp.uniform("x3", -4., 0.), 'x4': hp.uniform("x4", 1., 5.), 'x5': hp.uniform("x5", 0., .5), } return(space)
Example #27
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'bilinear': hp.choice('bilinear', [True, False]), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #28
Source File: automl.py From KDDCup2019_admin with MIT License | 5 votes |
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, params: Dict, config: Config, max_evals=10): X_train, X_test, y_train, y_test = data_split_by_time(X_train, y_train, test_size=0.2) X_train, X_val, y_train, y_val = data_split_by_time(X_train, y_train, test_size=0.3) train_data = lgb.Dataset(X_train, label=y_train) valid_data = lgb.Dataset(X_val, label=y_val) space = { "learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.5)), #"max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]), "max_depth": hp.choice("max_depth", [1, 2, 3, 4, 5, 6]), "num_leaves": hp.choice("num_leaves", np.linspace(10, 200, 50, dtype=int)), "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1), "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1), "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 50, 10, dtype=int)), "reg_alpha": hp.uniform("reg_alpha", 0, 2), "reg_lambda": hp.uniform("reg_lambda", 0, 2), "min_child_weight": hp.uniform('min_child_weight', 0.5, 10), } def objective(hyperparams): if config.time_left() < 50: return {'status': STATUS_FAIL} else: model = lgb.train({**params, **hyperparams}, train_data, 100, valid_data, early_stopping_rounds=10, verbose_eval=0) pred = model.predict(X_test) score = roc_auc_score(y_test, pred) #score = model.best_score["valid_0"][params["metric"]] # in classification, less is better return {'loss': -score, 'status': STATUS_OK} trials = Trials() best = hyperopt.fmin(fn=objective, space=space, trials=trials, algo=tpe.suggest, max_evals=max_evals, verbose=1, rstate=np.random.RandomState(1)) hyperparams = space_eval(space, best) log(f"auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}") return hyperparams
Example #29
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 10.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs. }
Example #30
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }