Python tensorflow.keras.callbacks.ReduceLROnPlateau() Examples
The following are 10
code examples of tensorflow.keras.callbacks.ReduceLROnPlateau().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.callbacks
, or try the search function
.
Example #1
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model(trainable=TRAINABLE) model.summary() if TRAINABLE: model.load_weights(WEIGHTS) train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss=loss, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_dice:.2f}.h5", monitor="val_dice", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_dice", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_dice", factor=0.2, patience=5, min_lr=1e-6, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #2
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss={"coords" : log_mse, "classes" : focal_loss()}, loss_weights={"coords" : 1, "classes" : 1}, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") model.summary() model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #3
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #4
Source File: test_multinetwork.py From timeserio with MIT License | 6 votes |
def _callbacks( self, *, es_params={ 'patience': 20, 'monitor': 'val_loss' }, lr_params={ 'monitor': 'val_loss', 'patience': 4, 'factor': 0.2 } ): early_stopping = EarlyStopping(**es_params) learning_rate_reduction = ReduceLROnPlateau(**lr_params) return { 'forecaster': [], 'embedder': [], 'combined': [ early_stopping, learning_rate_reduction ] }
Example #5
Source File: train.py From object-localization with MIT License | 5 votes |
def main(): model = create_model(trainable=TRAINABLE) model.summary() if TRAINABLE: model.load_weights(WEIGHTS) train_datagen = DataGenerator(TRAIN_CSV) val_generator = DataGenerator(VALIDATION_CSV, rnd_rescale=False, rnd_multiply=False, rnd_crop=False, rnd_flip=False, debug=False) validation_datagen = Validation(generator=val_generator) learning_rate = LEARNING_RATE if TRAINABLE: learning_rate /= 10 optimizer = SGD(lr=learning_rate, decay=LR_DECAY, momentum=0.9, nesterov=False) model.compile(loss=detection_loss(), optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.6, patience=5, min_lr=1e-6, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTITHREADING, shuffle=True, verbose=1)
Example #6
Source File: train.py From bootcamp with Apache License 2.0 | 5 votes |
def fit_model_softmax(dsm: DeepSpeakerModel, kx_train, ky_train, kx_test, ky_test, batch_size=BATCH_SIZE, max_epochs=1000, initial_epoch=0): checkpoint_name = dsm.m.name + '_checkpoint' checkpoint_filename = os.path.join(CHECKPOINTS_SOFTMAX_DIR, checkpoint_name + '_{epoch}.h5') checkpoint = ModelCheckpoint(monitor='val_accuracy', filepath=checkpoint_filename, save_best_only=True) # if the accuracy does not increase by 0.1% over 20 epochs, we stop the training. early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=20, verbose=1, mode='max') # if the accuracy does not increase over 10 epochs, we reduce the learning rate by half. reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=10, min_lr=0.0001, verbose=1) max_len_train = len(kx_train) - len(kx_train) % batch_size kx_train = kx_train[0:max_len_train] ky_train = ky_train[0:max_len_train] max_len_test = len(kx_test) - len(kx_test) % batch_size kx_test = kx_test[0:max_len_test] ky_test = ky_test[0:max_len_test] dsm.m.fit(x=kx_train, y=ky_train, batch_size=batch_size, epochs=initial_epoch + max_epochs, initial_epoch=initial_epoch, verbose=1, shuffle=True, validation_data=(kx_test, ky_test), callbacks=[early_stopping, reduce_lr, checkpoint])
Example #7
Source File: test_multinetwork.py From timeserio with MIT License | 5 votes |
def _callbacks( self, *, lr_params=dict(monitor='loss', patience=1, factor=0.01) ): learning_rate_reduction = ReduceLROnPlateau(**lr_params) return { 'forecaster': [learning_rate_reduction], }
Example #8
Source File: kashgari_intent_classifier.py From rasa_nlu_gq with Apache License 2.0 | 4 votes |
def train(self, training_data, cfg, **kwargs): classifier_model = eval("clf." + self.classifier_model) epochs = self.component_config.get('epochs') batch_size = self.component_config.get('batch_size') validation_split = self.component_config.get('validation_split') patience = self.component_config.get('patience') factor = self.component_config.get('factor') verbose = self.component_config.get('verbose') X, Y = [], [] for msg in training_data.intent_examples: X.append(self.tokenizer.tokenize(msg.text)) Y.append(msg.get('intent')) train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100) self.bert_embedding.processor.add_bos_eos = False self.model = classifier_model(self.bert_embedding) checkpoint = ModelCheckpoint( 'intent_weights.h5', monitor='val_loss', save_best_only=True, save_weights_only=False, verbose=verbose) early_stopping = EarlyStopping( monitor='val_loss', patience=patience) reduce_lr = ReduceLROnPlateau( monitor='val_loss', factor=factor, patience=patience, verbose=verbose) self.model.fit( train_x, train_y, validate_x, validate_y, epochs=epochs, batch_size=batch_size, callbacks=[checkpoint, early_stopping, reduce_lr] )
Example #9
Source File: kashgari_entity_extractor.py From rasa_nlu_gq with Apache License 2.0 | 4 votes |
def train(self, training_data, cfg, **kwargs): labeling_model = eval("labeling." + self.labeling_model) epochs = self.component_config.get('epochs') batch_size = self.component_config.get('batch_size') validation_split = self.component_config.get('validation_split') patience = self.component_config.get('patience') factor = self.component_config.get('factor') verbose = self.component_config.get('verbose') filtered_entity_examples = self.filter_trainable_entities(training_data.training_examples) X, Y = self._create_dataset(filtered_entity_examples) train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100) self.model = labeling_model(self.bert_embedding) checkpoint = ModelCheckpoint( 'entity_weights.h5', monitor='val_loss', save_best_only=True, save_weights_only=False, verbose=verbose) early_stopping = EarlyStopping( monitor='val_loss', patience=patience) reduce_lr = ReduceLROnPlateau( monitor='val_loss', factor=factor, patience=patience, verbose=verbose) self.model.fit( train_x, train_y, validate_x, validate_y, epochs=epochs, batch_size=batch_size, callbacks=[checkpoint, early_stopping, reduce_lr] )
Example #10
Source File: train.py From keras-mobile-detectnet with MIT License | 4 votes |
def main(batch_size: int = 24, epochs: int = 384, train_path: str = 'train', val_path: str = 'val', weights=None, workers: int = 8): # We use an extra input during training to discount bounding box loss when a class is not present in an image. discount_input = Input(shape=(7, 7), name='discount') keras_model = MobileDetectNetModel.complete_model(extra_inputs=[discount_input]) keras_model.summary() if weights is not None: keras_model.load_weights(weights, by_name=True) train_seq = MobileDetectNetSequence(train_path, stage="train", batch_size=batch_size) val_seq = MobileDetectNetSequence(val_path, stage="val", batch_size=batch_size) callbacks = [] def region_loss(classes): def loss_fn(y_true, y_pred): # Don't penalize bounding box errors when there is no object present return 10 * (classes * K.abs(y_pred[:, :, :, 0] - y_true[:, :, :, 0]) + classes * K.abs(y_pred[:, :, :, 1] - y_true[:, :, :, 1]) + classes * K.abs(y_pred[:, :, :, 2] - y_true[:, :, :, 2]) + classes * K.abs(y_pred[:, :, :, 3] - y_true[:, :, :, 3])) return loss_fn keras_model.compile(optimizer=Nadam(lr=0.001), loss=['mean_absolute_error', region_loss(discount_input), 'binary_crossentropy']) filepath = "weights-{epoch:02d}-{val_loss:.4f}-multi-gpu.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks.append(checkpoint) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001, verbose=1) callbacks.append(reduce_lr) try: os.mkdir('logs') except FileExistsError: pass tensorboard = TensorBoard(log_dir='logs/%s' % time.strftime("%Y-%m-%d_%H-%M-%S")) callbacks.append(tensorboard) keras_model.fit_generator(train_seq, validation_data=val_seq, epochs=epochs, steps_per_epoch=np.ceil(len(train_seq) / batch_size), validation_steps=np.ceil(len(val_seq) / batch_size), callbacks=callbacks, use_multiprocessing=True, workers=workers, shuffle=True)