Python keras.callbacks.LearningRateScheduler() Examples

The following are 30 code examples of keras.callbacks.LearningRateScheduler(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.callbacks , or try the search function .
Example #1
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #2
Source File: training.py    From 3D-CNNs-for-Liver-Classification with Apache License 2.0 6 votes vote down vote up
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
                  learning_rate_patience=50, logging_file="training.log", verbosity=1,
                  early_stopping_patience=None):
    callbacks = list()
    callbacks.append(ModelCheckpoint(model_file,monitor='val_acc', save_best_only=True,verbose=verbosity, save_weights_only=True))
    # callbacks.append(ModelCheckpoint(model_file, save_best_only=True, save_weights_only=True))
    callbacks.append(CSVLogger(logging_file, append=True))
    if learning_rate_epochs:
        callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
                                                       drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
    else:
        callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
                                           verbose=verbosity))
    if early_stopping_patience:
        callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
    return callbacks 
Example #3
Source File: main.py    From DnCNN-keras with MIT License 6 votes vote down vote up
def train():
    
    data = load_train_data()
    data = data.reshape((data.shape[0],data.shape[1],data.shape[2],1))
    data = data.astype('float32')/255.0
    # model selection
    if args.pretrain:   model = load_model(args.pretrain, compile=False)
    else:   
        if args.model == 'DnCNN': model = models.DnCNN()
    # compile the model
    model.compile(optimizer=Adam(), loss=['mse'])
    
    # use call back functions
    ckpt = ModelCheckpoint(save_dir+'/model_{epoch:02d}.h5', monitor='val_loss', 
                    verbose=0, period=args.save_every)
    csv_logger = CSVLogger(save_dir+'/log.csv', append=True, separator=',')
    lr = LearningRateScheduler(step_decay)
    # train 
    history = model.fit_generator(train_datagen(data, batch_size=args.batch_size),
                    steps_per_epoch=len(data)//args.batch_size, epochs=args.epoch, verbose=1, 
                    callbacks=[ckpt, csv_logger, lr])
    
    return model 
Example #4
Source File: cnn_mnist_softmax.py    From affinity-loss with MIT License 6 votes vote down vote up
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_mnist(inbalance_size)
    y_train = y_train[:, :10]
    y_test = y_test[:, :10]

    model = create_models()
    model.compile("adam", "categorical_crossentropy", ["acc"])

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=128, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
Example #5
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #6
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #7
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #8
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #9
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #10
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #11
Source File: dev_runner.py    From parapred with MIT License 6 votes vote down vote up
def full_run(dataset, out_weights="weights.h5"):
    cache_file = dataset.split("/")[-1] + ".p"
    dataset = open_dataset(dataset, dataset_cache=cache_file)
    cdrs, lbls, masks = dataset["cdrs"], dataset["lbls"], dataset["masks"]

    sample_weight = np.squeeze((lbls * 1.7 + 1) * masks)
    model = ab_seq_model(dataset["max_cdr_len"])

    rate_schedule = lambda e: 0.001 if e >= 10 else 0.01

    model.fit([cdrs, np.squeeze(masks)],
              lbls, batch_size=32, epochs=18,
              sample_weight=sample_weight,
              callbacks=[LearningRateScheduler(rate_schedule)])

    model.save_weights(out_weights) 
Example #12
Source File: snapshot.py    From keras-contrib with MIT License 6 votes vote down vote up
def get_callbacks(self, model_prefix='Model'):
        """
        Creates a list of callbacks that can be used during training to create a
        snapshot ensemble of the model.

        Args:
            model_prefix: prefix for the filename of the weights.

        Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
                 SnapshotModelCheckpoint] which can be provided to the 'fit' function
        """
        if not os.path.exists('weights/'):
            os.makedirs('weights/')

        callback_list = [ModelCheckpoint('weights/%s-Best.h5' % model_prefix,
                                         monitor='val_acc',
                                         save_best_only=True, save_weights_only=True),
                         LearningRateScheduler(schedule=self._cosine_anneal_schedule),
                         SnapshotModelCheckpoint(self.T,
                                                 self.M,
                                                 fn_prefix='weights/%s' % model_prefix)]

        return callback_list 
Example #13
Source File: snapshot.py    From textcaps with MIT License 6 votes vote down vote up
def get_callbacks(self,log, model_prefix='Model'):
        """
        Creates a list of callbacks that can be used during training to create a
        snapshot ensemble of the model.
        Args:
            model_prefix: prefix for the filename of the weights.
        Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
                 SnapshotModelCheckpoint] which can be provided to the 'fit' function
        """
        if not os.path.exists(self.save_dir+'/weights/'):
            os.makedirs(self.save_dir+'/weights/')

        callback_list = [callbacks.ModelCheckpoint(self.save_dir+"/weights/weights_{epoch:002d}.h5", monitor="val_capsnet_acc",
                                                    save_best_only=True, save_weights_only=False),
                         callbacks.LearningRateScheduler(schedule=self._cosine_anneal_schedule),
                         SnapshotModelCheckpoint(self.T, self.M, fn_prefix=self.save_dir+'/weights/%s' % model_prefix), log]

        return callback_list 
Example #14
Source File: cnn_mnist_affinity.py    From affinity-loss with MIT License 5 votes vote down vote up
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_mnist(inbalance_size)

    model = create_models()
    model.compile("adam", affinity_loss(0.75), [acc])

    scheduler = LearningRateScheduler(step_decay)
    f1 = F1Callback(model, X_test, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1],
                        batch_size=128, epochs=100, verbose=0).history

    max_acc = max(history["val_acc"])
    max_f1 = max(f1.f1_log)
    print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}") 
Example #15
Source File: training.py    From EUSIPCO2017 with GNU Affero General Public License v3.0 5 votes vote down vote up
def train(self):
        model = self.model_module.build_model(IRMAS_N_CLASSES)

        early_stopping = EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_EPOCH)
        save_clb = ModelCheckpoint(
            "{weights_basepath}/{model_path}/".format(
                weights_basepath=MODEL_WEIGHT_BASEPATH,
                model_path=self.model_module.BASE_NAME) +
            "epoch.{epoch:02d}-val_loss.{val_loss:.3f}-fbeta.{val_fbeta_score:.3f}"+"-{key}.hdf5".format(
                key=self.model_module.MODEL_KEY),
            monitor='val_loss',
            save_best_only=True)
        lrs = LearningRateScheduler(lambda epoch_n: self.init_lr / (2**(epoch_n//SGD_LR_REDUCE)))
        model.summary()
        model.compile(optimizer=self.optimizer,
                      loss='categorical_crossentropy',
                      metrics=['accuracy', fbeta_score])

        history = model.fit_generator(self._batch_generator(self.X_train, self.y_train),
                                      samples_per_epoch=self.model_module.SAMPLES_PER_EPOCH,
                                      nb_epoch=MAX_EPOCH_NUM,
                                      verbose=2,
                                      callbacks=[save_clb, early_stopping, lrs],
                                      validation_data=self._batch_generator(self.X_val, self.y_val),
                                      nb_val_samples=self.model_module.SAMPLES_PER_VALIDATION,
                                      class_weight=None,
                                      nb_worker=1)

        pickle.dump(history.history, open('{history_basepath}/{model_path}/history_{model_key}.pkl'.format(
            history_basepath=MODEL_HISTORY_BASEPATH,
            model_path=self.model_module.BASE_NAME,
            model_key=self.model_module.MODEL_KEY),
            'w')) 
Example #16
Source File: training.py    From 3DUnetCNN with MIT License 5 votes vote down vote up
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
                  learning_rate_patience=50, logging_file="training.log", verbosity=1,
                  early_stopping_patience=None):
    callbacks = list()
    callbacks.append(ModelCheckpoint(model_file, save_best_only=True))
    callbacks.append(CSVLogger(logging_file, append=True))
    if learning_rate_epochs:
        callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
                                                       drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
    else:
        callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
                                           verbose=verbosity))
    if early_stopping_patience:
        callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
    return callbacks 
Example #17
Source File: train_model.py    From cv with MIT License 5 votes vote down vote up
def train_model(weight = None, batch_size=256, epochs = 10):

    cg = caption_generator.CaptionGenerator()
    model = cg.create_model()

    if weight != None:
        model.load_weights(weight)

    counter = 0
    file_name = DATA_PATH + 'weights-checkpoint.h5'

    #define callbacks
    checkpoint = ModelCheckpoint(file_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    tensor_board = TensorBoard(log_dir='./logs', write_graph=True)
    hist_lr = LR_hist()
    reduce_lr = LearningRateScheduler(step_decay) 
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=16, verbose=1)
    callbacks_list = [checkpoint, tensor_board, hist_lr, reduce_lr, early_stopping]

    hist = model.fit_generator(cg.data_generator_train(batch_size=batch_size), steps_per_epoch=cg.total_samples/batch_size, epochs=epochs, verbose=2, callbacks=callbacks_list, validation_data=cg.data_generator_val(batch_size=batch_size), validation_steps=cg.total_samples/(batch_size*13.0))

    model.save(DATA_PATH + 'final_model.h5', overwrite=True)
    model.save_weights(DATA_PATH + 'final_weights.h5',overwrite=True)

    hist_file = DATA_PATH + '/hist_model.dat'
    with open(hist_file, 'w') as f:
        pickle.dump(hist.history, f)

    print "training complete...\n"

    return model, hist, hist_lr 
Example #18
Source File: multiclass_experiment.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def train_cifar10():
    (x_train, y_train), (x_test, y_test) = load_cifar10()

    idg = ImageDataGenerator(
        horizontal_flip=True,
        height_shift_range=4,
        width_shift_range=4,
        fill_mode='reflect'
    )

    idg.fit(x_train)

    n = 16
    k = 8
    mdl = create_wide_residual_network(x_train.shape[1:], 10, n, k)
    mdl.compile(SGDTorch(lr=.1, momentum=0.9, nesterov=True), 'categorical_crossentropy', ['acc'])

    lr_cb = LearningRateScheduler(lambda e: 0.1 * (0.2 ** (e >= 160 and 3 or e >= 120 and 2 or e >= 60 and 1 or 0)))

    batch_size = 128
    mdl.fit_generator(
        generator=idg.flow(x_train, to_categorical(y_train), batch_size=batch_size),
        epochs=200,
        validation_data=(idg.standardize(x_test), to_categorical(y_test)),
        callbacks=[lr_cb]
    )
    mdl.save_weights('cifar10_WRN_{}-{}.h5'.format(n, k)) 
Example #19
Source File: keras_blstm_crf.py    From sequence-tagging-ner with Apache License 2.0 5 votes vote down vote up
def gen_callbacks(self, callbacks_list):
        lrate = LearningRateScheduler(self.step_decay)
        callbacks_list.append(lrate)

        #loss_history = LossHistory(self.step_decay)
        #callbacks_list.append(loss_history)
        return callbacks_list 
Example #20
Source File: hccrn.py    From hwr-address with GNU General Public License v3.0 5 votes vote down vote up
def model_train(model, dataset, batch_size, weights_path, history_path, nb_epoch=200, samples_per_epoch=1000000):
	checkpointer = ModelCheckpoint(filepath=weights_path, verbose=1, save_best_only=False, save_weights_only=True)
	#lrate = LearningRateScheduler(step_decay)
	lrate = ReduceLROnPlateau(monitor='loss', factor=0.3, patience=5, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0.00001)
	#early_stop = EarlyStopping(monitor='loss', patience=5, verbose=0, mode='auto')
	history = model.fit_generator(input_data.generate_data(dataset, batch_size), samples_per_epoch, nb_epoch, callbacks=[checkpointer, lrate])
	with open(history_path, 'w') as f:
		f.write(str(history.history))
	return model 
Example #21
Source File: training.py    From keras-rtst with MIT License 5 votes vote down vote up
def train(model, args, input_generator, callbacks=[], num_samples=10):
    lr_schedule = TextureNetLearningSchedule(
        args.batch_size, args.num_iterations_per_epoch,
        initial_lr=args.learn_rate, min_lr=args.min_learn_rate,
        cliff=20)
    callbacks.append(LearningRateScheduler(lr_schedule))
    try:
        model.fit_generator(
            generate_training_batches(args, input_generator),
            samples_per_epoch=args.num_iterations_per_epoch, nb_epoch=args.num_epochs,
            callbacks=callbacks
        )
    except KeyboardInterrupt:
        print('Stopping training...') 
Example #22
Source File: mnist_affinity_visualize.py    From affinity-loss with MIT License 5 votes vote down vote up
def train(inbalance_size):
    (X_train, y_train), (X_test, y_test) = inbalanced_mnist(inbalance_size)

    model = create_models()
    model.compile("adam", affinity_loss(0.75), [acc])

    scheduler = LearningRateScheduler(step_decay)
    cb = EmbeddingCallback(model, X_train, X_test, y_train, y_test)

    history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[cb, scheduler],
                        batch_size=128, epochs=200, verbose=1).history

    with tarfile.open("mnist_inbalanced.tar", "w") as tar:
        tar.add("mnist_inbalanced") 
Example #23
Source File: enhancer.py    From ImageEnhancer with MIT License 5 votes vote down vote up
def train_model(self):
        """ train the model """
        callbacks = []
        callbacks.append(TensorBoard(self.graph_path))
        callbacks.append(LearningRateScheduler(lambda e: self.learning_rate * 0.999 ** (e / 20)))
        callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.best.hdf5', save_best_only=True))
        if not self.best_cp:
            callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.{epoch:02d}-{val_loss:.2f}.hdf5'))
        callbacks.append(LambdaCallback(on_epoch_end=lambda epoch, logs: self.save_image('test.{e:02d}-{val_loss:.2f}'.format(e=epoch, **logs))))
        self.model.compile(Adam(lr=self.learning_rate), binary_crossentropy)
        self.model.fit(self.corrupted['train'], self.source['train'],
                       batch_size=self.batch_size,
                       epochs=self.epoch,
                       callbacks=callbacks,
                       validation_data=(self.corrupted['valid'], self.source['valid'])) 
Example #24
Source File: lang_model_sgd.py    From tying-wv-and-wc with MIT License 5 votes vote down vote up
def get_lr_scheduler(self):
        def scheduler(epoch):
            epoch_interval = K.get_value(self.epoch_interval)
            if epoch != 0 and (epoch + 1) % epoch_interval == 0:
                lr = K.get_value(self.lr)
                decay = K.get_value(self.decay)
                K.set_value(self.lr, lr * decay)
                if self.verbose:
                    print(self.get_config())
            return K.get_value(self.lr)
    
        return LearningRateScheduler(scheduler) 
Example #25
Source File: callback.py    From DeepANPR with MIT License 5 votes vote down vote up
def get_callbacks(args):
    scheduler = LearningRateScheduler(lr_scheduler)
    modelckpt = ModelCheckpoint(filepath=join(args.experiment_dir, 'best-model.h5'), monitor='val_loss', save_best_only=True, mode='auto')
    lr_tensorboard = LRTensorBoard(log_dir=join(args.experiment_dir, 'TensorBoard'))

    callbacks = [modelckpt, lr_tensorboard, scheduler]
    return callbacks 
Example #26
Source File: training.py    From Keras-Brats-Improved-Unet3d with MIT License 5 votes vote down vote up
def get_callbacks(model_file, initial_learning_rate=0.0001, learning_rate_drop=0.5, learning_rate_epochs=None,
                  learning_rate_patience=50, logging_file="training.log", verbosity=1,
                  early_stopping_patience=None):
    callbacks = list()
    callbacks.append(ModelCheckpoint(model_file, save_best_only=True))
    callbacks.append(CSVLogger(logging_file, append=True))
    if learning_rate_epochs:
        callbacks.append(LearningRateScheduler(partial(step_decay, initial_lrate=initial_learning_rate,
                                                       drop=learning_rate_drop, epochs_drop=learning_rate_epochs)))
    else:
        callbacks.append(ReduceLROnPlateau(factor=learning_rate_drop, patience=learning_rate_patience,
                                           verbose=verbosity))
    if early_stopping_patience:
        callbacks.append(EarlyStopping(verbose=verbosity, patience=early_stopping_patience))
    return callbacks 
Example #27
Source File: textcaps_emnist_bal.py    From textcaps with MIT License 5 votes vote down vote up
def train(model, data, args):
    """
    Training a CapsuleNet
    :param model: the CapsuleNet model
    :param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
    :param args: arguments
    :return: The trained model
    """
    (x_train, y_train), (x_test, y_test) = data

    log = callbacks.CSVLogger(args.save_dir + '/log.csv')
    checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',
                                           save_best_only=False, save_weights_only=True, verbose=1)
    lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))

    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=[margin_loss, 'mse'],
                  loss_weights=[1., args.lam_recon],
                  metrics={'capsnet': 'accuracy'})

    def train_generator(x, y, batch_size, shift_fraction=0.):
        train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
                                           height_shift_range=shift_fraction)
        generator = train_datagen.flow(x, y, batch_size=batch_size)
        while 1:
            x_batch, y_batch = generator.next()
            yield ([x_batch, y_batch], [y_batch, x_batch])

    model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size, args.shift_fraction),
                        steps_per_epoch=int(y_train.shape[0] / args.batch_size),
                        epochs=args.epochs,
                        shuffle = True,
                        validation_data=[[x_test, y_test], [y_test, x_test]],
                        callbacks=snapshot.get_callbacks(log,model_prefix=model_prefix))

    model.save_weights(args.save_dir + '/trained_model.h5')
    print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)

    return model 
Example #28
Source File: step2_train_nodule_detector.py    From kaggle_ndsb2017 with MIT License 5 votes vote down vote up
def train(model_name, fold_count, train_full_set=False, load_weights_path=None, ndsb3_holdout=0, manual_labels=True):
    batch_size = 16
    train_files, holdout_files = get_train_holdout_files(train_percentage=80, ndsb3_holdout=ndsb3_holdout, manual_labels=manual_labels, full_luna_set=train_full_set, fold_count=fold_count)

    # train_files = train_files[:100]
    # holdout_files = train_files[:10]
    train_gen = data_generator(batch_size, train_files, True)
    holdout_gen = data_generator(batch_size, holdout_files, False)
    for i in range(0, 10):
        tmp = next(holdout_gen)
        cube_img = tmp[0][0].reshape(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1)
        cube_img = cube_img[:, :, :, 0]
        cube_img *= 255.
        cube_img += MEAN_PIXEL_VALUE
        # helpers.save_cube_img("c:/tmp/img_" + str(i) + ".png", cube_img, 4, 8)
        # print(tmp)

    learnrate_scheduler = LearningRateScheduler(step_decay)
    model = get_net(load_weight_path=load_weights_path)
    holdout_txt = "_h" + str(ndsb3_holdout) if manual_labels else ""
    if train_full_set:
        holdout_txt = "_fs" + holdout_txt
    checkpoint = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_e" + "{epoch:02d}-{val_loss:.4f}.hd5", monitor='val_loss', verbose=1, save_best_only=not train_full_set, save_weights_only=False, mode='auto', period=1)
    checkpoint_fixed_name = ModelCheckpoint("workdir/model_" + model_name + "_" + holdout_txt + "_best.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
    model.fit_generator(train_gen, len(train_files) / 1, 12, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / 1, callbacks=[checkpoint, checkpoint_fixed_name, learnrate_scheduler])
    model.save("workdir/model_" + model_name + "_" + holdout_txt + "_end.hd5") 
Example #29
Source File: train.py    From toxic_comments with MIT License 5 votes vote down vote up
def define_callbacks(early_stopping_delta, early_stopping_epochs, use_lr_strategy=True, initial_lr=0.005, lr_drop_koef=0.66, epochs_to_drop=5, model_checkpoint_dir=None):
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=early_stopping_delta, patience=early_stopping_epochs, verbose=1)
    callbacks_list = [early_stopping]
    if model_checkpoint_dir is not None:
        model_checkpoint = ModelCheckpoint(os.path.join(model_checkpoint_dir,'weights.{epoch:02d}-{val_loss:.2f}.h5'), monitor='val_loss', save_best_only=True, verbose=0)
        callbacks_list.append(model_checkpoint)
    if use_lr_strategy:
        epochs_to_drop = float(epochs_to_drop)
        loss_history = LossHistory(initial_lr, lr_drop_koef, epochs_to_drop)
        lrate = LearningRateScheduler(lambda epoch: step_decay(initial_lr, lr_drop_koef, epochs_to_drop, epoch))
        callbacks_list.append(loss_history)
        callbacks_list.append(lrate)
    return callbacks_list 
Example #30
Source File: multiclass_experiment.py    From AnomalyDetectionTransformations with MIT License 4 votes vote down vote up
def train_cifar10_transformations():
    (x_train, y_train), _ = load_cifar10()

    transformer = Transformer(8, 8)

    def data_gen(x, y, batch_size):
        while True:
            ind_permutation = np.random.permutation(len(x))
            for b_start_ind in range(0, len(x), batch_size):
                batch_inds = ind_permutation[b_start_ind:b_start_ind + batch_size]
                x_batch = x[batch_inds]
                y_batch = y[batch_inds].flatten()

                if K.image_data_format() == 'channels_first':
                    x_batch = np.transpose(x_batch, (0, 2, 3, 1))

                y_t_batch = np.random.randint(0, transformer.n_transforms, size=len(x_batch))

                x_batch = transformer.transform_batch(x_batch, y_t_batch)

                if K.image_data_format() == 'channels_first':
                    x_batch = np.transpose(x_batch, (0, 3, 1, 2))

                yield (x_batch, [to_categorical(y_batch, num_classes=10), to_categorical(y_t_batch, num_classes=transformer.n_transforms)])

    n = 16
    k = 8
    base_mdl = create_wide_residual_network(x_train.shape[1:], 10, n, k)

    transformations_cls_out = Activation('softmax')(dense(transformer.n_transforms)(base_mdl.get_layer(index=-3).output))

    mdl = Model(base_mdl.input, [base_mdl.output, transformations_cls_out])

    mdl.compile(SGDTorch(lr=.1, momentum=0.9, nesterov=True), 'categorical_crossentropy', ['acc'])

    lr_cb = LearningRateScheduler(lambda e: 0.1 * (0.2 ** (e >= 160 and 3 or e >= 120 and 2 or e >= 60 and 1 or 0)))

    batch_size = 128
    mdl.fit_generator(
        generator=data_gen(x_train, y_train, batch_size=batch_size),
        steps_per_epoch=len(x_train) // batch_size,
        epochs=200,
        callbacks=[lr_cb]
    )
    mdl.save_weights('cifar10_WRN_doublehead-transformations_{}-{}.h5'.format(n, k))