Python hyperopt.hp.qloguniform() Examples
The following are 26
code examples of hyperopt.hp.qloguniform().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
hyperopt.hp
, or try the search function
.
Example #1
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #2
Source File: cartpole_worker.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): import numpy as np from hyperopt import hp space = { 'learning_rate': hp.loguniform('learning_rate', np.log(1e-7), np.log(1e-1)), 'batch_size': hp.qloguniform('batch_size', np.log(8), np.log(256), 1), 'n_units_1': hp.qloguniform('n_units_1', np.log(8), np.log(128), 1), 'n_units_2': hp.qloguniform('n_units_2', np.log(8), np.log(128), 1), 'discount': hp.uniform('discount', 0, 1), 'likelihood_ratio_clipping': hp.uniform('likelihood_ratio_clipping', 0, 1), 'entropy_regularization': hp.uniform('entropy_regularization', 0, 1) } return(space)
Example #3
Source File: bnn_worker.py From BOAH with Apache License 2.0 | 5 votes |
def tpe_configspace(self): from hyperopt import hp import numpy as np space = { 'l_rate': hp.loguniform('l_rate', np.log(1e-6), np.log(1e-1)), 'burn_in': hp.uniform('burn_in', 0, .8), 'n_units_1': hp.qloguniform('n_units_1', np.log(16), np.log(512), 1), 'n_units_2': hp.qloguniform('n_units_2', np.log(16), np.log(512), 1), 'mdecay': hp.uniform('mdecay', 0, 1) } return(space)
Example #4
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_qloguniform_int(self): d_int_1 = configuration_space.UniformIntegerHyperparameter( "d_int", 1, 3, q=1.0, base=np.e) expected = ("d_int", 'param_0 = pyll.scope.int(hp.qloguniform(' '"d_int", -0.69312718076, 1.2527629685, 1.0))') value = self.pyll_writer.write_hyperparameter(d_int_1, None) self.assertEqual(expected, value) d_int_2 = configuration_space.UniformIntegerHyperparameter( "d_int", 1, 3, q=2.0, base=np.e) expected = ("d_int", 'param_1 = pyll.scope.int(hp.qloguniform(' '"d_int", -0.69312718076, 1.2527629685, 2.0))') value = self.pyll_writer.write_hyperparameter(d_int_2, None) self.assertEqual(expected, value)
Example #5
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_qloguniform(self): d = configuration_space.UniformFloatHyperparameter("d", 0.1, 3, q=0.1, base=np.e) expected = ("d", 'param_0 = hp.qloguniform("d", -2.99373427089, ' '1.11514159062, 0.1)') value = self.pyll_writer.write_hyperparameter(d, None) self.assertEqual(expected, value)
Example #6
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_loguniform_int(self): c_int = configuration_space.UniformIntegerHyperparameter( "c_int", 1, 10, base=np.e) expected = ("c_int", 'param_0 = pyll.scope.int(hp.qloguniform(' '"c_int", -0.69312718076, 2.35137525716, 1.0))') value = self.pyll_writer.write_hyperparameter(c_int, None) self.assertEqual(expected, value)
Example #7
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_write_uniform(self): a = configuration_space.UniformFloatHyperparameter("a", 0, 1) expected = ('a', 'param_0 = hp.uniform("a", 0.0, 1.0)') value = self.pyll_writer.write_hyperparameter(a, None) self.assertEqual(expected, value) # The hyperparameter name has to be converted seperately because # otherwise the parameter values are converted at object costruction # time a = configuration_space.UniformFloatHyperparameter("a", 1, 10, base=10) a.name = self.pyll_writer.convert_name(a) expected = ('LOG10_a', 'param_1 = hp.uniform("LOG10_a", 0.0, 1.0)') value = self.pyll_writer.write_hyperparameter(a, None) self.assertEqual(expected, value) nhid1 = configuration_space.UniformFloatHyperparameter( "nhid1", 16, 1024, q=16, base=np.e) expected = ('nhid1', 'param_2 = hp.qloguniform(' '"nhid1", 2.0794540416, 6.93925394604, 16.0)') value = self.pyll_writer.write_hyperparameter(nhid1, None) self.assertEqual(expected, value)
Example #8
Source File: test_pyll_util.py From HPOlib with GNU General Public License v3.0 | 5 votes |
def test_read_qloguniform(self): # 0 float # 1 hyperopt_param # 2 Literal{nhid1} # 3 qloguniform # 4 Literal{2.77258872224} # 5 Literal{6.9314718056} # 6 q = # 7 Literal{16} qloguniform = hp.qloguniform('nhid1', np.log(16), np.log(1024), q=16). \ inputs()[0].inputs()[1] ret = self.pyll_reader.read_qloguniform(qloguniform, 'nhid1') expected = configuration_space.UniformFloatHyperparameter( 'nhid1', 16, 1024, q=16, base=np.e) self.assertEqual(expected, ret)
Example #9
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #10
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 10.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs. }
Example #11
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #12
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #13
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #14
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #15
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #16
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(256),1)), 'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #17
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(64),1)), 'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(64),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #18
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(256),1)), 'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #19
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'bilinear': hp.choice('bilinear', [True, False]), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #20
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(128),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #21
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #22
Source File: hyperparams.py From pykg2vec with MIT License | 5 votes |
def __init__(self): self.search_space = { 'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)), 'L1_flag': hp.choice('L1_flag', [True, False]), 'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(256),1)), 'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)), 'margin': hp.uniform('margin', 0.0, 2.0), 'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']), 'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs. }
Example #23
Source File: hyperopt_optimizer.py From bayesmark with Apache License 2.0 | 4 votes |
def get_hyperopt_dimensions(api_config): """Help routine to setup hyperopt search space in constructor. Take api_config as argument so this can be static. """ # The ordering of iteration prob makes no difference, but just to be # safe and consistnent with space.py, I will make sorted. param_list = sorted(api_config.keys()) space = {} round_to_values = {} for param_name in param_list: param_config = api_config[param_name] param_type = param_config["type"] param_space = param_config.get("space", None) param_range = param_config.get("range", None) param_values = param_config.get("values", None) # Some setup for case that whitelist of values is provided: values_only_type = param_type in ("cat", "ordinal") if (param_values is not None) and (not values_only_type): assert param_range is None param_values = np.unique(param_values) param_range = (param_values[0], param_values[-1]) round_to_values[param_name] = interp1d( param_values, param_values, kind="nearest", fill_value="extrapolate" ) if param_type == "int": low, high = param_range if param_space in ("log", "logit"): space[param_name] = hp.qloguniform(param_name, np.log(low), np.log(high), 1) else: space[param_name] = hp.quniform(param_name, low, high, 1) elif param_type == "bool": assert param_range is None assert param_values is None space[param_name] = hp.choice(param_name, (False, True)) elif param_type in ("cat", "ordinal"): assert param_range is None space[param_name] = hp.choice(param_name, param_values) elif param_type == "real": low, high = param_range if param_space in ("log", "logit"): space[param_name] = hp.loguniform(param_name, np.log(low), np.log(high)) else: space[param_name] = hp.uniform(param_name, low, high) else: assert False, "type %s not handled in API" % param_type return space, round_to_values
Example #24
Source File: hyperopt_optimizer.py From bayesmark with Apache License 2.0 | 4 votes |
def get_hyperopt_dimensions(api_config): """Help routine to setup hyperopt search space in constructor. Take api_config as argument so this can be static. """ # The ordering of iteration prob makes no difference, but just to be # safe and consistnent with space.py, I will make sorted. param_list = sorted(api_config.keys()) space = {} round_to_values = {} for param_name in param_list: param_config = api_config[param_name] param_type = param_config["type"] param_space = param_config.get("space", None) param_range = param_config.get("range", None) param_values = param_config.get("values", None) # Some setup for case that whitelist of values is provided: values_only_type = param_type in ("cat", "ordinal") if (param_values is not None) and (not values_only_type): assert param_range is None param_values = np.unique(param_values) param_range = (param_values[0], param_values[-1]) round_to_values[param_name] = interp1d( param_values, param_values, kind="nearest", fill_value="extrapolate" ) if param_type == "int": low, high = param_range if param_space in ("log", "logit"): space[param_name] = hp.qloguniform(param_name, np.log(low), np.log(high), 1) else: space[param_name] = hp.quniform(param_name, low, high, 1) elif param_type == "bool": assert param_range is None assert param_values is None space[param_name] = hp.choice(param_name, (False, True)) elif param_type in ("cat", "ordinal"): assert param_range is None space[param_name] = hp.choice(param_name, param_values) elif param_type == "real": low, high = param_range if param_space in ("log", "logit"): space[param_name] = hp.loguniform(param_name, np.log(low), np.log(high)) else: space[param_name] = hp.uniform(param_name, low, high) else: assert False, "type %s not handled in API" % param_type return space, round_to_values
Example #25
Source File: lale_hyperopt.py From lale with Apache License 2.0 | 4 votes |
def visitSearchSpaceNumber(self, space:SearchSpaceNumber, path:str, counter=None): label = self.mk_label(path, counter) if space.pgo is not None: return scope.pgo_sample(space.pgo, hp.quniform(label, 0, len(space.pgo)-1, 1)) dist = "uniform" if space.distribution: dist = space.distribution if space.maximum is None: raise SearchSpaceError(path, f"maximum not specified for a number with distribution {dist}") max = space.getInclusiveMax() # These distributions need only a maximum if dist == "integer": if not space.discrete: raise SearchSpaceError(path, "integer distribution specified for a non discrete numeric type") return hp.randint(label, max) if space.minimum is None: raise SearchSpaceError(path, f"minimum not specified for a number with distribution {dist}") min = space.getInclusiveMin() if dist == "uniform": if space.discrete: return scope.int(hp.quniform(label, min, max, 1)) else: return hp.uniform(label, min, max) elif dist == "loguniform": # for log distributions, hyperopt requires that we provide the log of the min/max if min <= 0: raise SearchSpaceError(path, f"minimum of 0 specified with a {dist} distribution. This is not allowed; please set it (possibly using minimumForOptimizer) to be positive") if min > 0: min = math.log(min) if max > 0: max = math.log(max) if space.discrete: return scope.int(hp.qloguniform(label, min, max, 1)) else: return hp.loguniform(label, min, max) else: raise SearchSpaceError(path, f"Unknown distribution type: {dist}")
Example #26
Source File: lale_hyperopt.py From lale with Apache License 2.0 | 4 votes |
def visitSearchSpaceNumber(self, space:SearchSpaceNumber, path:str, counter=None, useCounter=True): label = self.mk_label(path, counter, useCounter=useCounter) if space.pgo is not None: self.pgo_dict[label] = space.pgo return f"scope.pgo_sample(pgo_{label}, hp.quniform('{label}', {0}, {len(space.pgo)-1}, 1))" dist = "uniform" if space.distribution: dist = space.distribution if space.maximum is None: SearchSpaceError(path, f"maximum not specified for a number with distribution {dist}") max = space.getInclusiveMax() # These distributions need only a maximum if dist == "integer": if not space.discrete: raise SearchSpaceError(path, "integer distribution specified for a non discrete numeric type....") return f"hp.randint('{label}', {max})" if space.minimum is None: raise SearchSpaceError(path, f"minimum not specified for a number with distribution {dist}") min = space.getInclusiveMin() if dist == "uniform": if space.discrete: return f"hp.quniform('{label}', {min}, {max}, 1)" else: return f"hp.uniform('{label}', {min}, {max})" elif dist == "loguniform": # for log distributions, hyperopt requires that we provide the log of the min/max if min <= 0: raise SearchSpaceError(path, f"minimum of 0 specified with a {dist} distribution. This is not allowed; please set it (possibly using minimumForOptimizer) to be positive") if min > 0: min = math.log(min) if max > 0: max = math.log(max) if space.discrete: return f"hp.qloguniform('{label}', {min}, {max}, 1)" else: return f"hp.loguniform('{label}', {min}, {max})" else: raise SearchSpaceError(path, f"Unknown distribution type: {dist}")