Python keras.callbacks.ModelCheckpoint() Examples

The following are 30 code examples of keras.callbacks.ModelCheckpoint(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.callbacks , or try the search function .
Example #1
Source File: train.py    From DeepLearning-OCR with Apache License 2.0 6 votes vote down vote up
def train(model, batch_size, nb_epoch, save_dir, train_data, val_data, char_set):
	X_train, y_train = train_data[0], train_data[1]
	sample_weight = get_sample_weight(y_train, char_set)
	print 'X_train shape:', X_train.shape
	print X_train.shape[0], 'train samples'
	if os.path.exists(save_dir) == False:
		os.mkdir(save_dir)

	start_time = time.time()
	save_path = save_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'
	check_pointer = ModelCheckpoint(save_path, 
		save_best_only=True)
	history = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, 
		validation_data=val_data,
		validation_split=0.1, 
		callbacks=[check_pointer],
		sample_weight=sample_weight
		)

	plot_loss_figure(history, save_dir + str(datetime.now()).split('.')[0].split()[1]+'.jpg')
	print 'Training time(h):', (time.time()-start_time) / 3600 
Example #2
Source File: train_audio.py    From AudioEmotion with MIT License 6 votes vote down vote up
def train():
    model = create_model()
    model.compile(optimizer='adam',
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['accuracy'])
    checkpointer = callbacks.ModelCheckpoint(filepath="../Output/checkpoint.hdf5", verbose=1, save_best_only=True)
    x_train, x_test, y_train, y_test = load_audio_data()
    model.fit(x_train,
              y_train,
              epochs=1000,
              batch_size=1000,
              validation_split=0.2,
              callbacks=[checkpointer])
    results = model.evaluate(x_test, y_test)
    print('test_results: ', results)

    model.save(MODEL_FILE_PATH) 
Example #3
Source File: callbacks.py    From keras-bert-ner with MIT License 6 votes vote down vote up
def NerCallbacks(id_to_tag, best_fit_params=None, mask_tag=None, log_path=None):
    """模型训练过程中的回调函数
    """
    callbacks = [Accuracy(id_to_tag, mask_tag, log_path)]
    if best_fit_params is not None:
        early_stopping = EarlyStopping(
            monitor="val_crf_accuracy",
            patience=best_fit_params.get("early_stop_patience"))
        reduce_lr_on_plateau = ReduceLROnPlateau(
            monitor="val_crf_accuracy", verbose=1, mode="max",
            factor=best_fit_params.get("reduce_lr_factor"),
            patience=best_fit_params.get("reduce_lr_patience"))
        model_check_point = ModelCheckpoint(
            best_fit_params.get("save_path"),
            monitor="val_crf_accuracy", verbose=2, mode="max", save_best_only=True)
        callbacks.extend([early_stopping, reduce_lr_on_plateau, model_check_point])
    return callbacks 
Example #4
Source File: train.py    From PJ_NLP with Apache License 2.0 6 votes vote down vote up
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
Example #5
Source File: cnn_model_train.py    From Sign-Language-Interpreter-using-Deep-Learning with MIT License 6 votes vote down vote up
def cnn_model():
	num_of_classes = get_num_of_classes()
	model = Sequential()
	model.add(Conv2D(16, (2,2), input_shape=(image_x, image_y, 1), activation='relu'))
	model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
	model.add(Conv2D(32, (3,3), activation='relu'))
	model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='same'))
	model.add(Conv2D(64, (5,5), activation='relu'))
	model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
	model.add(Flatten())
	model.add(Dense(128, activation='relu'))
	model.add(Dropout(0.2))
	model.add(Dense(num_of_classes, activation='softmax'))
	sgd = optimizers.SGD(lr=1e-2)
	model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
	filepath="cnn_model_keras2.h5"
	checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
	callbacks_list = [checkpoint1]
	#from keras.utils import plot_model
	#plot_model(model, to_file='model.png', show_shapes=True)
	return model, callbacks_list 
Example #6
Source File: train.py    From MalConv-keras with MIT License 6 votes vote down vote up
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
    
    # callbacks
    ear = EarlyStopping(monitor='val_acc', patience=5)
    mcp = ModelCheckpoint(join(save_path, 'malconv.h5'), 
                          monitor="val_acc", 
                          save_best_only=save_best, 
                          save_weights_only=False)
    
    history = model.fit_generator(
        utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
        steps_per_epoch=len(x_train)//batch_size + 1,
        epochs=epochs, 
        verbose=verbose, 
        callbacks=[ear, mcp],
        validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
        validation_steps=len(x_test)//batch_size + 1)
    return history 
Example #7
Source File: train_model.py    From caption_generator with MIT License 6 votes vote down vote up
def train_model(weight = None, batch_size=32, epochs = 10):

    cg = caption_generator.CaptionGenerator()
    model = cg.create_model()

    if weight != None:
        model.load_weights(weight)

    counter = 0
    file_name = 'weights-improvement-{epoch:02d}.hdf5'
    checkpoint = ModelCheckpoint(file_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [checkpoint]
    model.fit_generator(cg.data_generator(batch_size=batch_size), steps_per_epoch=cg.total_samples/batch_size, epochs=epochs, verbose=2, callbacks=callbacks_list)
    try:
        model.save('Models/WholeModel.h5', overwrite=True)
        model.save_weights('Models/Weights.h5',overwrite=True)
    except:
        print "Error in saving model."
    print "Training complete...\n" 
Example #8
Source File: finetuning.py    From DeepMoji with MIT License 6 votes vote down vote up
def finetuning_callbacks(checkpoint_path, patience, verbose):
    """ Callbacks for model training.

    # Arguments:
        checkpoint_path: Where weight checkpoints should be saved.
        patience: Number of epochs with no improvement after which
            training will be stopped.

    # Returns:
        Array with training callbacks that can be passed straight into
        model.fit() or similar.
    """
    cb_verbose = (verbose >= 2)
    checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
                                   save_best_only=True, verbose=cb_verbose)
    earlystop = EarlyStopping(monitor='val_loss', patience=patience,
                              verbose=cb_verbose)
    return [checkpointer, earlystop] 
Example #9
Source File: model.py    From ancient-Chinese-poem-generator with MIT License 6 votes vote down vote up
def train(X,Y,file,load_path):
	# define model
	model = Sequential()
	model.add(LSTM(n_mmu, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
	model.add(Dropout(dropout))
	model.add(LSTM(n_mmu, return_sequences=True))
	model.add(Dropout(dropout))
	if n_layer == 3:
		model.add(LSTM(n_mmu))
		model.add(Dropout(dropout))
	model.add(Dense(Y.shape[1], activation='softmax'))
	model.compile(loss='categorical_crossentropy', optimizer='adam')

	model.save(file + "/model-{}-{}.h5".format(n_mmu,dropout))
	# define the checkpoint
	filepath=file + "/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
	checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
	callbacks_list = [checkpoint]
	# loading
	if load_path != "":
		model.load_weights(load_path)
	# training
	model.fit(X, Y, epochs=epoch, batch_size=batch, callbacks=callbacks_list,validation_split = 0.1) 
Example #10
Source File: snapshot.py    From textcaps with MIT License 6 votes vote down vote up
def get_callbacks(self,log, model_prefix='Model'):
        """
        Creates a list of callbacks that can be used during training to create a
        snapshot ensemble of the model.
        Args:
            model_prefix: prefix for the filename of the weights.
        Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler,
                 SnapshotModelCheckpoint] which can be provided to the 'fit' function
        """
        if not os.path.exists(self.save_dir+'/weights/'):
            os.makedirs(self.save_dir+'/weights/')

        callback_list = [callbacks.ModelCheckpoint(self.save_dir+"/weights/weights_{epoch:002d}.h5", monitor="val_capsnet_acc",
                                                    save_best_only=True, save_weights_only=False),
                         callbacks.LearningRateScheduler(schedule=self._cosine_anneal_schedule),
                         SnapshotModelCheckpoint(self.T, self.M, fn_prefix=self.save_dir+'/weights/%s' % model_prefix), log]

        return callback_list 
Example #11
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def train_model(self):

		checkpoint = ModelCheckpoint(self.PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

		if self.modality == "audio":
			model = self.get_audio_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "text":
			model = self.get_text_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "bimodal":
			model = self.get_bimodal_model()
			model.compile(optimizer='adam', loss='categorical_crossentropy', sample_weight_mode='temporal')

		early_stopping = EarlyStopping(monitor='val_loss', patience=10)
		model.fit(self.train_x, self.train_y,
		                epochs=self.epochs,
		                batch_size=self.batch_size,
		                sample_weight=self.train_mask,
		                shuffle=True, 
		                callbacks=[early_stopping, checkpoint],
		                validation_data=(self.val_x, self.val_y, self.val_mask))

		self.test_model() 
Example #12
Source File: train.py    From YOLO-3D-Box with MIT License 6 votes vote down vote up
def train(model, image_data, y_true, log_dir='logs/'):
    '''retrain/fine-tune the model'''
    model.compile(optimizer='adam', loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + "ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5",
        monitor='val_loss', save_weights_only=True, save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')

    model.fit([image_data, *y_true],
              np.zeros(len(image_data)),
              validation_split=.1,
              batch_size=32,
              epochs=30,
              callbacks=[logging, checkpoint, early_stopping])
    model.save_weights(log_dir + 'trained_weights.h5')
    # Further training. 
Example #13
Source File: train.py    From keras-ctpn with Apache License 2.0 6 votes vote down vote up
def get_call_back():
    """
    定义call back
    :return:
    """
    checkpoint = ModelCheckpoint(filepath='/tmp/ctpn.{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=False,
                                 save_weights_only=True,
                                 period=5)

    # 验证误差没有提升
    lr_reducer = ReduceLROnPlateau(monitor='loss',
                                   factor=0.1,
                                   cooldown=0,
                                   patience=10,
                                   min_lr=1e-4)
    log = TensorBoard(log_dir='log')
    return [lr_reducer, checkpoint, log] 
Example #14
Source File: train.py    From Dog-Cat-Classifier with Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
Example #15
Source File: run_utils.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def get_callbacks(config_data, appendix=''):
    ret_callbacks = []
    model_stored = False
    callbacks = config_data['callbacks']
    if K._BACKEND == 'tensorflow':
        tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10)
        ret_callbacks.append(tensor_board)
    for callback in callbacks:
        if callback['name'] == 'early_stopping':
            ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode']))
        elif callback['name'] == 'model_checkpoit':
            model_stored = True
            path = config_data['output_path']
            basename = config_data['output_basename']
            base_path = os.path.join(path, basename)
            opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix))
            save_best = bool(callback['save_best_only'])
            ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode']))
    return ret_callbacks, model_stored 
Example #16
Source File: deepae.py    From KATE with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self 
Example #17
Source File: trainer.py    From segmentation-unet-maskrcnn with MIT License 6 votes vote down vote up
def train_net(weights_folder, logs_folder, progress_predict_dir, config, loss_mode):

    print("start train net")
    train_dataset = build_dataset(config.TRAIN_DIR)
    val_dataset = build_dataset(config.VAL_DIR)
    x_trn, y_trn =  get_patches_dataset(train_dataset, config, shuffleOn=True, amt= config.AMT_TRAIN)
    x_val, y_val = get_patches_dataset(val_dataset, config, shuffleOn=False, amt= config.AMT_VAL)
    model = get_unet(config, loss_mode)
    os.makedirs(weights_folder, exist_ok=True)
    #model.load_weights('weights/unet_cl2_step0_e5_tr600_v600_jk0.6271')
    model_checkpoint = ModelCheckpoint(os.path.join(weights_folder,'unet_tmp.hdf5'), monitor='loss', save_best_only=True)
    tb_callback = TensorBoard(log_dir=logs_folder, histogram_freq=0, batch_size=config.BATCH_SIZE,
                              write_graph=True, write_grads=False, write_images=True,
                              embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
    start_time = time.time()
    for i in range(config.N_STEPS):
        print("Step i", i)
        model.fit(x_trn, y_trn, batch_size= config.BATCH_SIZE, epochs= config.EPOCS, verbose=1, shuffle=True,
                  callbacks=[model_checkpoint, tb_callback], validation_data=(x_val, y_val))

        print("---  Training for %s seconds ---" % (time.time() - start_time))
        score, trs = calc_jacc_img_msk(model, x_trn, y_trn, config.BATCH_SIZE, config.NUM_CLASSES)
        print('train jk', score)

        score, trs = calc_jacc_img_msk(model, x_val, y_val, config.BATCH_SIZE, config.NUM_CLASSES)
        print('val jk', score)
        score_str = '%.4f' % score
        model_name = 'unet_cl{0}_step{1}_e{2}_tr{3}_v{4}_jk{5}'.format(config.NUM_CLASSES, i, config.EPOCS,
                                                                       config.AMT_TRAIN, config.AMT_VAL,score_str)
        print("Weights: ", model_name)
        model.save_weights(os.path.join(weights_folder, model_name))

        #if (i % 10 == 0):
        check_predict_gold(model, model_name, progress_predict_dir, config, loss_mode)
        check_predict_small_test(model, model_name, progress_predict_dir, config, loss_mode)

        #Get ready for next step
        del x_trn
        del y_trn
        x_trn, y_trn = get_patches_dataset(train_dataset, config, shuffleOn=True, amt=config.AMT_TRAIN)
    return model 
Example #18
Source File: punchline_extractor.py    From robotreviewer with GNU General Public License v3.0 6 votes vote down vote up
def train_simple_inference_net(n_epochs=30):
    inf_net = SimpleInferenceNet()
    tr_ids, val_ids, te_ids = train_document_ids(), validation_document_ids(), test_document_ids()
    tr_ids = list(train_document_ids())
    train_Xy, inference_vectorizer = get_train_Xy(tr_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=True)

    X_k, y_k = make_Xy_inference(train_Xy, inf_net.bc)
    print("train data for inference task loaded!")

    val_Xy = get_Xy(val_ids, inference_vectorizer,  include_raw_texts=True)
    X_kv, y_kv = make_Xy_inference(val_Xy, inf_net.bc)
    print("val data loaded!")

    filepath="inference.weights.best.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]

    with open("inference_model.json", "w") as outf:
        outf.write(inf_net.model.to_json())

    print("fitting inference model!")
    inf_net.model.fit(X_k, y_k, validation_data=(X_kv, y_kv), callbacks=callbacks_list, epochs=n_epochs) 
Example #19
Source File: model.py    From PyMLProjects with MIT License 6 votes vote down vote up
def init_logging_callbacks(self,log_dir=LOG_DIR_ROOT):

		self.checkpoint = ModelCheckpoint(filepath="%s/weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" % (log_dir),\
														monitor='loss',\
														verbose=1,\
														save_best_only=True,\
														mode='min')

		self.early_stopping = EarlyStopping(monitor='loss',\
													min_delta=0,\
													patience=PATIENCE,\
													verbose=0,\
													mode='auto')	

		now = datetime.utcnow().strftime("%Y%m%d%H%M%S")	
		log_dir = "{}/run/{}".format(LOG_DIR_ROOT,now)
		self.tensorboard = TensorBoard(log_dir=log_dir,\
											write_graph=True,\
											write_images=True)
		
		self.callbacks = [self.early_stopping,\
								self.tensorboard,\
								self.checkpoint] 
Example #20
Source File: dl_image_toolbox_utils.py    From dataiku-contrib with Apache License 2.0 6 votes vote down vote up
def threadsafe_generator(f):
    """A decorator that takes a generator function and makes it thread-safe.
    """
    def g(*a, **kw):
        return ThreadsafeIterator(f(*a, **kw))
    return g

###############################################################
## MODEL CHECKPOINT FOR MULTI GPU
## When using multiple GPUs, we need to save the base model,
## not the one defined by multi_gpu_model
## see example: https://keras.io/utils/#multi_gpu_model
## Therefore, to save the model after each epoch by leveraging 
## ModelCheckpoint callback, we need to adapt it to save the 
## base model. To do so, we pass the base model to the callback.
## Inspired by: 
##   https://github.com/keras-team/keras/issues/8463#issuecomment-345914612
############################################################### 
Example #21
Source File: chatbot.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def train_model(self,model,X_train,X_test,y_train,y_test):
        input_y_train = self.include_start_token(y_train)
        print(input_y_train.shape)
        input_y_test = self.include_start_token(y_test)
        print(input_y_test.shape)
        early = EarlyStopping(monitor='val_loss',patience=10,mode='auto')

        checkpoint = ModelCheckpoint(self.outpath + 's2s_model_' + str(self.version) + '_.h5',monitor='val_loss',verbose=1,save_best_only=True,mode='auto')
        lr_reduce = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=2, verbose=0, mode='auto')
        model.fit([X_train,input_y_train],y_train, 
		      epochs=self.epochs,
		      batch_size=self.batch_size, 
		      validation_data=[[X_test,input_y_test],y_test], 
		      callbacks=[early,checkpoint,lr_reduce], 
		      shuffle=True)
        return model 
Example #22
Source File: dl_image_toolbox_utils.py    From dataiku-contrib with Apache License 2.0 6 votes vote down vote up
def threadsafe_generator(f):
    """A decorator that takes a generator function and makes it thread-safe.
    """
    def g(*a, **kw):
        return ThreadsafeIterator(f(*a, **kw))
    return g

###############################################################
## MODEL CHECKPOINT FOR MULTI GPU
## When using multiple GPUs, we need to save the base model,
## not the one defined by multi_gpu_model
## see example: https://keras.io/utils/#multi_gpu_model
## Therefore, to save the model after each epoch by leveraging 
## ModelCheckpoint callback, we need to adapt it to save the 
## base model. To do so, we pass the base model to the callback.
## Inspired by: 
##   https://github.com/keras-team/keras/issues/8463#issuecomment-345914612
############################################################### 
Example #23
Source File: unet.py    From detect-cell-edge-use-unet with GNU General Public License v2.0 6 votes vote down vote up
def train(self):
        print("loading data")
        imgs_train, imgs_mask_train, imgs_test = self.load_data()
        print("loading data done")
        model = self.get_unet()
        print("got unet")

        # 保存的是模型和权重,
        model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', verbose=1, save_best_only=True)
        print('Fitting model...')
        model.fit(imgs_train, imgs_mask_train, batch_size=1, epochs=10, verbose=1, shuffle=True,
                  callbacks=[model_checkpoint])

        print('predict test data')
        imgs_mask_test = model.predict(imgs_test, batch_size=1, verbose=1)
        np.save('imgs_mask_test.npy', imgs_mask_test) 
Example #24
Source File: textcaps_emnist_bal.py    From textcaps with MIT License 5 votes vote down vote up
def train(model, data, args):
    """
    Training a CapsuleNet
    :param model: the CapsuleNet model
    :param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
    :param args: arguments
    :return: The trained model
    """
    (x_train, y_train), (x_test, y_test) = data

    log = callbacks.CSVLogger(args.save_dir + '/log.csv')
    checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',
                                           save_best_only=False, save_weights_only=True, verbose=1)
    lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))

    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=[margin_loss, 'mse'],
                  loss_weights=[1., args.lam_recon],
                  metrics={'capsnet': 'accuracy'})

    def train_generator(x, y, batch_size, shift_fraction=0.):
        train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
                                           height_shift_range=shift_fraction)
        generator = train_datagen.flow(x, y, batch_size=batch_size)
        while 1:
            x_batch, y_batch = generator.next()
            yield ([x_batch, y_batch], [y_batch, x_batch])

    model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size, args.shift_fraction),
                        steps_per_epoch=int(y_train.shape[0] / args.batch_size),
                        epochs=args.epochs,
                        shuffle = True,
                        validation_data=[[x_test, y_test], [y_test, x_test]],
                        callbacks=snapshot.get_callbacks(log,model_prefix=model_prefix))

    model.save_weights(args.save_dir + '/trained_model.h5')
    print('Trained model saved to \'%s/trained_model.h5\'' % args.save_dir)

    return model 
Example #25
Source File: model.py    From WaterNet with MIT License 5 votes vote down vote up
def train_model(model,
                features,
                labels,
                tile_size,
                model_id,
                nb_epoch=10,
                checkpoints=False,
                tensorboard=False):
    """Train a model with the given features and labels."""

    # The features and labels are a list of triples when passed
    # to the function. Each triple contains the tile and information
    # about its source image and its postion in the source. To train
    # the model we extract just the tiles.
    X, y = get_matrix_form(features, labels, tile_size)

    X = normalise_input(X)

    # Directory which is used to store the model and its weights.
    model_dir = os.path.join(MODELS_DIR, model_id)

    checkpointer = None
    if checkpoints:
        checkpoints_file = os.path.join(model_dir, "weights.hdf5")
        checkpointer = ModelCheckpoint(checkpoints_file)

    tensorboarder = None
    if tensorboard:
        log_dir = os.path.join(TENSORBOARD_DIR, model_id)
        tensorboarder = TensorBoard(log_dir=log_dir)

    callbacks = [c for c in [checkpointer, tensorboarder] if c]

    print("Start training.")
    model.fit(X, y, nb_epoch=nb_epoch, callbacks=callbacks, validation_split=0.1)

    save_model(model, model_dir)
    return model 
Example #26
Source File: train_segmentation.py    From Person-Segmentation-Keras with Apache License 2.0 5 votes vote down vote up
def main():
    nClasses = args.nClasses
    train_batch_size = 16
    val_batch_size = 16
    epochs = 50
    img_height = 256
    img_width = 256
    root_path = '../../datasets/segmentation/'
    mode = 'seg' if nClasses == 2 else 'parse'
    train_file = './data/{}_train.txt'.format(mode)
    val_file = './data/{}_test.txt'.format(mode)
    if args.model == 'unet':
        model = unet.Unet(nClasses, input_height=img_height, input_width=img_width)
    elif args.model == 'segnet':
        model = segnet.SegNet(nClasses, input_height=img_height, input_width=img_width)
    else:
        raise ValueError('Does not support {}, only supports unet and segnet now'.format(args.model))

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=1e-4),
                  metrics=['accuracy'])
    model.summary()

    train = segdata_generator.generator(root_path, train_file, train_batch_size, nClasses, img_height, img_width)

    val = segdata_generator.generator(root_path, val_file, val_batch_size, nClasses, img_height, img_width, train=False)

    if not os.path.exists('./results/'):
        os.mkdir('./results')
    save_file = './weights/{}_seg_weights.h5'.format(args.model) if nClasses == 2 \
        else './weights/{}_parse_weights.h5'.format(args.model)
    checkpoint = ModelCheckpoint(save_file, monitor='val_acc', save_best_only=True, save_weights_only=True, verbose=1)
    history = model.fit_generator(train,
                        steps_per_epoch=12706 // train_batch_size,
                        validation_data=val,
                        validation_steps=5000 // val_batch_size,
                        epochs=epochs,
                        callbacks=[checkpoint],
                                  )
    plot_history(history, './results/', args.model)
    save_history(history, './results/', args.model) 
Example #27
Source File: lstm.py    From Classical-Piano-Composer with MIT License 5 votes vote down vote up
def train(model, network_input, network_output):
    """ train the neural network """
    filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
    checkpoint = ModelCheckpoint(
        filepath,
        monitor='loss',
        verbose=0,
        save_best_only=True,
        mode='min'
    )
    callbacks_list = [checkpoint]

    model.fit(network_input, network_output, epochs=200, batch_size=128, callbacks=callbacks_list) 
Example #28
Source File: nn.py    From mljar-supervised with MIT License 5 votes vote down vote up
def fit(self, X, y, X_validation=None, y_validation=None, log_to_file=None):

        if self.model is None:
            self.create_model(input_dim=X.shape[1])
        
        batch_size = 1024
        if X.shape[0] < batch_size * 5:
            batch_size = 32
        
        self.model.fit(X, y, batch_size=batch_size, epochs=self.rounds, verbose=False)
        
        """
        # Experimental ...
        es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=50)
        mc = ModelCheckpoint(
            "best_model.h5",
            monitor="val_loss",
            mode="min",
            verbose=0,
            save_best_only=True,
        )
        self.model.fit(
            X,
            y,
            validation_data=(X_validation, y_validation),
            batch_size=4096,
            epochs=1000,
            verbose=False,
            callbacks=[es, mc],
        )
        self.model = load_model("best_model.h5")
        """ 
Example #29
Source File: model.py    From PyMLProjects with MIT License 5 votes vote down vote up
def fit(self):
		print "[*] training model..." 
		self.checkpoint = ModelCheckpoint(filepath=self.filepath,monitor=self.monitor,verbose=self.verbose,save_best_only=self.save_best_only,mode=self.mode)
		self.model.fit(self.X,self.y,epochs=self.epochs,batch_size=self.batch_size,callbacks=[self.checkpoint]) 
Example #30
Source File: fit_train.py    From PyMLProjects with MIT License 5 votes vote down vote up
def fit(dataset, model):
	X = dataset[:,0:9] #get first 7 fields
	Y = dataset[:,9] #use 9th field as output 1 0r 0

	filepath="9-5-1_weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
	checkpoint = ModelCheckpoint(filepath,monitor='loss',verbose=1,save_best_only=True,mode='min',period=120)
	callback_list = [checkpoint]


	model.fit(X,Y,epochs=800,batch_size=100,callbacks=callback_list)	
	return #should there be a return value here?