Python lasagne.regularization.l2() Examples
The following are 30
code examples of lasagne.regularization.l2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
lasagne.regularization
, or try the search function
.
Example #1
Source File: conv_sup_regression_4ch_rot.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #2
Source File: conv_sup_regression_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, ago_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(ago_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #3
Source File: conv_sup_regression_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, ago_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(ago_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #4
Source File: conv_sup_regression_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #5
Source File: conv_sup_regression_baseline.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network, encode_layer, input_var, aug_var, target_var, stack_params, weight_decay): output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.0001, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.0001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #6
Source File: conv_sup_regression_baseline.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network, encode_layer, input_var, aug_var, target_var, stack_params, weight_decay): output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.0001, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.0001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #7
Source File: conv_sup_regression.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #8
Source File: conv_sup_regression.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #9
Source File: conv_sup_regression_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, ago_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(ago_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #10
Source File: conv_sup_regression_syn.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #11
Source File: conv_sup_regression_baseline.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #12
Source File: conv_sup_regression_hseg.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #13
Source File: conv_sup_regression_4ch_rot.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #14
Source File: conv_sup_regression_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #15
Source File: conv_sup_regression_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #16
Source File: conv_sup_regression.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #17
Source File: conv_sup_regression_hseg.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #18
Source File: conv_sup_regression_4ch_he.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, he_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(he_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #19
Source File: conv_sup_regression_syn.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #20
Source File: conv_sup_regression_4ch_ago.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, ago_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(ago_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #21
Source File: conv_sup_regression.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #22
Source File: conv_sup_regression_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #23
Source File: conv_sup_large_regression_syn.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #24
Source File: conv_sup_regression_hseg_4ch_relu.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); pred_train = lasagne.layers.get_output(network, deterministic = False); loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #25
Source File: conv_sup_regression_hseg_4ch.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); pred_train = lasagne.layers.get_output(network, deterministic = False); loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.000005, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.000005, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #26
Source File: conv_sup_regression_hseg_4ch_leaky.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); pred_train = lasagne.layers.get_output(network, deterministic = False); loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #27
Source File: conv_sup_regression_hseg_4ch_he.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, he_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); pred_train = lasagne.layers.get_output(network, deterministic = False); loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(he_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #28
Source File: conv_sup_regression_hseg.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #29
Source File: conv_sup_regression_baseline.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]); train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;
Example #30
Source File: conv_sup_regression_syn.py From u24_lymphocyte with BSD 3-Clause "New" or "Revised" License | 6 votes |
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay): encode_layer, hidden_layer, smth_act_layer, network = network_layers; output = lasagne.layers.get_output(network, deterministic = True); loss = lasagne.objectives.squared_error(output, target_var).mean() + \ weight_decay * regularization.regularize_network_params( layer = network, penalty = regularization.l2, tags={'regularizable' : True}); params = layers.get_all_params(network, trainable = True); updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95); stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95); encode = lasagne.layers.get_output(encode_layer, deterministic = True); hidden = lasagne.layers.get_output(hidden_layer, deterministic = True); smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True); val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]); train_fn = theano.function([input_var, target_var], loss, updates = updates); stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates); return val_fn, train_fn, stack_train_fn;