Python lasagne.updates.adam() Examples

The following are 8 code examples of lasagne.updates.adam(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module lasagne.updates , or try the search function .
Example #1
Source File: Deopen_classification.py    From Deopen with MIT License 6 votes vote down vote up
def model_initial(X_train,y_train,max_iter = 5):
    global params, val_acc
    params = []
    val_acc = np.zeros(max_iter)
    lr = theano.shared(np.float32(1e-4))
    for iteration in range(max_iter):
        print 'Initializing weights (%d/5) ...'%(iteration+1)
        network_init = create_network()
        net_init = NeuralNet(
                network_init,
                max_epochs=3,
                update=adam,
                update_learning_rate=lr,
                train_split=TrainSplit(eval_size=0.1),
                batch_iterator_train=BatchIterator(batch_size=32),
                batch_iterator_test=BatchIterator(batch_size=64),
                on_training_finished=[SaveTrainHistory(iteration = iteration)],
                verbose=0)
        net_init.initialize()
        net_init.fit(X_train, y_train)
        
#model training 
Example #2
Source File: Deopen_classification.py    From Deopen with MIT License 6 votes vote down vote up
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
    network = create_network()
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
                network,
                max_epochs=epochs,
                update=adam,
                update_learning_rate=lr,
                train_split=TrainSplit(eval_size=0.1),
                batch_iterator_train=BatchIterator(batch_size=32),
                batch_iterator_test=BatchIterator(batch_size=64),
                #on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
                on_epoch_finished=[EarlyStopping(patience=5)],
                verbose=1)
    print 'Loading pre-training weights...'
    net.load_params_from(params[val_acc.argmax()])
    print 'Continue to train...'
    net.fit(X_train, y_train)
    print 'Model training finished.'
    return net


#model testing 
Example #3
Source File: Deopen_regression.py    From Deopen with MIT License 6 votes vote down vote up
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
    network = create_network()
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
                network,
                max_epochs=epochs,
                update=adam,
                update_learning_rate=lr,
                train_split=TrainSplit(eval_size=0.1),
                batch_iterator_train=BatchIterator(batch_size=32),
                batch_iterator_test=BatchIterator(batch_size=64),
                regression = True,
                objective_loss_function = squared_error,
                #on_training_started=[LoadBestParam(iteration=val_loss.argmin())],
                on_epoch_finished=[EarlyStopping(patience=5)],
                verbose=1)
    print 'loading pre-training weights...'
    net.load_params_from(params[val_loss.argmin()])
    print 'continue to train...'
    net.fit(X_train, y_train)
    print 'training finished'
    return net

#model testing 
Example #4
Source File: lasagne_net.py    From BirdCLEF-Baseline with MIT License 6 votes vote down vote up
def net_updates(net, loss, lr):    
                        
    # Get all trainable parameters (weights) of our net
    params = l.get_all_params(net, trainable=True)

    # We use the adam update, other options are available
    if cfg.OPTIMIZER == 'adam':
        param_updates = updates.adam(loss, params, learning_rate=lr, beta1=0.9)
    elif cfg.OPTIMIZER == 'nesterov':
        param_updates = updates.nesterov_momentum(loss, params, learning_rate=lr, momentum=0.9)
    elif cfg.OPTIMIZER == 'sgd':
        param_updates = updates.sgd(loss, params, learning_rate=lr)    

    return param_updates

#################### TRAIN FUNCTION ##################### 
Example #5
Source File: Deopen_regression.py    From Deopen with MIT License 5 votes vote down vote up
def model_initial(X_train,y_train,max_iter = 5):
    global params, val_loss
    params = []
    val_loss = np.zeros(max_iter)
    lr = theano.shared(np.float32(1e-4))
    for iteration in range(max_iter):
        print 'initializing weights (%d/5) ...'%(iteration+1)
        print iteration
        network_init = create_network()
        net_init = NeuralNet(
                network_init,
                max_epochs=3,
                update=adam,
                update_learning_rate=lr,
                train_split=TrainSplit(eval_size=0.1),
                batch_iterator_train=BatchIterator(batch_size=32),
                batch_iterator_test=BatchIterator(batch_size=64),
                regression = True,
                objective_loss_function = squared_error,
                on_training_finished=[SaveTrainHistory(iteration = iteration)],
                verbose=0)
        net_init.initialize()
        net_init.fit(X_train, y_train)


#model training 
Example #6
Source File: parameter_updates.py    From dcase_task2 with MIT License 5 votes vote down vote up
def get_update_adam():
    """
    Compute update with momentum
    """

    def update(all_grads, all_params, learning_rate):
        """ Compute updates from gradients """
        return adam(all_grads, all_params, learning_rate)

    return update 
Example #7
Source File: blend.py    From kaggle_diabetic with MIT License 5 votes vote down vote up
def get_estimator(n_features, files, labels, eval_size=0.1):
    layers = [
        (InputLayer, {'shape': (None, n_features)}),
        (DenseLayer, {'num_units': N_HIDDEN_1, 'nonlinearity': rectify,
                      'W': init.Orthogonal('relu'),
                      'b': init.Constant(0.01)}),
        (FeaturePoolLayer, {'pool_size': 2}),
        (DenseLayer, {'num_units': N_HIDDEN_2, 'nonlinearity': rectify,
                      'W': init.Orthogonal('relu'),
                      'b': init.Constant(0.01)}),
        (FeaturePoolLayer, {'pool_size': 2}),
        (DenseLayer, {'num_units': 1, 'nonlinearity': None}),
    ]
    args = dict(
        update=adam,
        update_learning_rate=theano.shared(util.float32(START_LR)),
        batch_iterator_train=ResampleIterator(BATCH_SIZE),
        batch_iterator_test=BatchIterator(BATCH_SIZE),
        objective=nn.get_objective(l1=L1, l2=L2),
        eval_size=eval_size,
        custom_score=('kappa', util.kappa) if eval_size > 0.0 else None,
        on_epoch_finished=[
            nn.Schedule('update_learning_rate', SCHEDULE),
        ],
        regression=True,
        max_epochs=N_ITER,
        verbose=1,
    )
    net = BlendNet(layers, **args)
    net.set_split(files, labels)
    return net 
Example #8
Source File: updates.py    From Deep-SVDD with MIT License 4 votes vote down vote up
def get_updates(nnet,
                train_obj,
                trainable_params,
                solver=None):

    implemented_solvers = ("sgd", "momentum", "nesterov", "adagrad", "rmsprop", "adadelta", "adam", "adamax")

    if solver not in implemented_solvers:
        nnet.sgd_solver = "adam"
    else:
        nnet.sgd_solver = solver

    if nnet.sgd_solver == "sgd":
        updates = l_updates.sgd(train_obj,
                                trainable_params,
                                learning_rate=Cfg.learning_rate)
    elif nnet.sgd_solver == "momentum":
        updates = l_updates.momentum(train_obj,
                                     trainable_params,
                                     learning_rate=Cfg.learning_rate,
                                     momentum=Cfg.momentum)
    elif nnet.sgd_solver == "nesterov":
        updates = l_updates.nesterov_momentum(train_obj,
                                              trainable_params,
                                              learning_rate=Cfg.learning_rate,
                                              momentum=Cfg.momentum)
    elif nnet.sgd_solver == "adagrad":
        updates = l_updates.adagrad(train_obj,
                                    trainable_params,
                                    learning_rate=Cfg.learning_rate)
    elif nnet.sgd_solver == "rmsprop":
        updates = l_updates.rmsprop(train_obj,
                                    trainable_params,
                                    learning_rate=Cfg.learning_rate,
                                    rho=Cfg.rho)
    elif nnet.sgd_solver == "adadelta":
        updates = l_updates.adadelta(train_obj,
                                     trainable_params,
                                     learning_rate=Cfg.learning_rate,
                                     rho=Cfg.rho)
    elif nnet.sgd_solver == "adam":
        updates = l_updates.adam(train_obj,
                                 trainable_params,
                                 learning_rate=Cfg.learning_rate)
    elif nnet.sgd_solver == "adamax":
        updates = l_updates.adamax(train_obj,
                                   trainable_params,
                                   learning_rate=Cfg.learning_rate)

    return updates