Python tensorflow.keras.callbacks.EarlyStopping() Examples
The following are 11
code examples of tensorflow.keras.callbacks.EarlyStopping().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.callbacks
, or try the search function
.
Example #1
Source File: deep_classifier.py From nlp-journey with Apache License 2.0 | 6 votes |
def train(self, batch_size=512, epochs=20): model = self.build_model() # early_stop配合checkpoint使用,可以得到val_loss最小的模型 early_stop = EarlyStopping(patience=3, verbose=1) checkpoint = ModelCheckpoint(os.path.join(self.model_path, 'weights.{epoch:03d}-{val_loss:.3f}.h5'), verbose=1, monitor='val_loss', save_best_only=True) history = model.fit(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks=[checkpoint, early_stop], validation_data=(self.x_test, self.y_test)) plot(history) return model
Example #2
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model(trainable=TRAINABLE) model.summary() if TRAINABLE: model.load_weights(WEIGHTS) train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss=loss, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_dice:.2f}.h5", monitor="val_dice", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_dice", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_dice", factor=0.2, patience=5, min_lr=1e-6, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #3
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) optimizer = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model.compile(loss={"coords" : log_mse, "classes" : focal_loss()}, loss_weights={"coords" : 1, "classes" : 1}, optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") model.summary() model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #4
Source File: train.py From object-localization with MIT License | 6 votes |
def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1)
Example #5
Source File: test_multinetwork.py From timeserio with MIT License | 6 votes |
def _callbacks( self, *, es_params={ 'patience': 20, 'monitor': 'val_loss' }, lr_params={ 'monitor': 'val_loss', 'patience': 4, 'factor': 0.2 } ): early_stopping = EarlyStopping(**es_params) learning_rate_reduction = ReduceLROnPlateau(**lr_params) return { 'forecaster': [], 'embedder': [], 'combined': [ early_stopping, learning_rate_reduction ] }
Example #6
Source File: siamese_similarity.py From nlp-journey with Apache License 2.0 | 5 votes |
def train(self, weights_only=True, call_back=False): model = self._build_model() if call_back: early_stopping = EarlyStopping(monitor='val_loss', patience=30) stamp = 'lstm_%d' % self.n_hidden checkpoint_dir = os.path.join( self.model_path, 'checkpoints/' + str(int(time.time())) + '/') if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) bst_model_path = checkpoint_dir + stamp + '.h5' if weights_only: model_checkpoint = ModelCheckpoint( bst_model_path, save_best_only=True, save_weights_only=True) else: model_checkpoint = ModelCheckpoint( bst_model_path, save_best_only=True) tensor_board = TensorBoard( log_dir=checkpoint_dir + "logs/{}".format(time.time())) callbacks = [early_stopping, model_checkpoint, tensor_board] else: callbacks = None model_trained = model.fit([self.x_train['left'], self.x_train['right']], self.y_train, batch_size=self.batch_size, epochs=self.epochs, validation_data=([self.x_val['left'], self.x_val['right']], self.y_val), verbose=1, callbacks=callbacks) if weights_only and not call_back: model.save_weights(os.path.join(self.model_path, 'weights_only.h5')) elif not weights_only and not call_back: model.save(os.path.join(self.model_path, 'model.h5')) self._save_config() plot(model_trained) return model
Example #7
Source File: train.py From object-localization with MIT License | 5 votes |
def main(): model = create_model(trainable=TRAINABLE) model.summary() if TRAINABLE: model.load_weights(WEIGHTS) train_datagen = DataGenerator(TRAIN_CSV) val_generator = DataGenerator(VALIDATION_CSV, rnd_rescale=False, rnd_multiply=False, rnd_crop=False, rnd_flip=False, debug=False) validation_datagen = Validation(generator=val_generator) learning_rate = LEARNING_RATE if TRAINABLE: learning_rate /= 10 optimizer = SGD(lr=learning_rate, decay=LR_DECAY, momentum=0.9, nesterov=False) model.compile(loss=detection_loss(), optimizer=optimizer, metrics=[]) checkpoint = ModelCheckpoint("model-{val_iou:.2f}.h5", monitor="val_iou", verbose=1, save_best_only=True, save_weights_only=True, mode="max") stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.6, patience=5, min_lr=1e-6, verbose=1, mode="max") model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, checkpoint, reduce_lr, stop], workers=THREADS, use_multiprocessing=MULTITHREADING, shuffle=True, verbose=1)
Example #8
Source File: train.py From bootcamp with Apache License 2.0 | 5 votes |
def fit_model_softmax(dsm: DeepSpeakerModel, kx_train, ky_train, kx_test, ky_test, batch_size=BATCH_SIZE, max_epochs=1000, initial_epoch=0): checkpoint_name = dsm.m.name + '_checkpoint' checkpoint_filename = os.path.join(CHECKPOINTS_SOFTMAX_DIR, checkpoint_name + '_{epoch}.h5') checkpoint = ModelCheckpoint(monitor='val_accuracy', filepath=checkpoint_filename, save_best_only=True) # if the accuracy does not increase by 0.1% over 20 epochs, we stop the training. early_stopping = EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=20, verbose=1, mode='max') # if the accuracy does not increase over 10 epochs, we reduce the learning rate by half. reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=10, min_lr=0.0001, verbose=1) max_len_train = len(kx_train) - len(kx_train) % batch_size kx_train = kx_train[0:max_len_train] ky_train = ky_train[0:max_len_train] max_len_test = len(kx_test) - len(kx_test) % batch_size kx_test = kx_test[0:max_len_test] ky_test = ky_test[0:max_len_test] dsm.m.fit(x=kx_train, y=ky_train, batch_size=batch_size, epochs=initial_epoch + max_epochs, initial_epoch=initial_epoch, verbose=1, shuffle=True, validation_data=(kx_test, ky_test), callbacks=[early_stopping, reduce_lr, checkpoint])
Example #9
Source File: train.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def train(): with open('config.json', 'r') as f: cfg = json.load(f) save_dir = cfg['save_dir'] shape = (int(cfg['height']), int(cfg['width']), 3) n_class = int(cfg['class_number']) batch = int(cfg['batch']) if not os.path.exists(save_dir): os.mkdir(save_dir) if cfg['model'] == 'large': from mobilenet_v3_large import MobileNetV3_Large model = MobileNetV3_Large(shape, n_class).build() if cfg['model'] == 'small': from mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(shape, n_class).build() opt = Adam(lr=float(cfg['learning_rate'])) earlystop = EarlyStopping(monitor='val_acc', patience=5, verbose=0, mode='auto') model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) train_generator, validation_generator, count1, count2 = generate(batch, shape[:2], cfg['train_dir'], cfg['eval_dir']) hist = model.fit_generator( train_generator, validation_data=validation_generator, steps_per_epoch=count1 // batch, validation_steps=count2 // batch, epochs=cfg['epochs'], callbacks=[earlystop]) df = pd.DataFrame.from_dict(hist.history) df.to_csv(os.path.join(save_dir, 'hist.csv'), encoding='utf-8', index=False) model.save_weights(os.path.join(save_dir, '{}_weights.h5'.format(cfg['model'])))
Example #10
Source File: kashgari_intent_classifier.py From rasa_nlu_gq with Apache License 2.0 | 4 votes |
def train(self, training_data, cfg, **kwargs): classifier_model = eval("clf." + self.classifier_model) epochs = self.component_config.get('epochs') batch_size = self.component_config.get('batch_size') validation_split = self.component_config.get('validation_split') patience = self.component_config.get('patience') factor = self.component_config.get('factor') verbose = self.component_config.get('verbose') X, Y = [], [] for msg in training_data.intent_examples: X.append(self.tokenizer.tokenize(msg.text)) Y.append(msg.get('intent')) train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100) self.bert_embedding.processor.add_bos_eos = False self.model = classifier_model(self.bert_embedding) checkpoint = ModelCheckpoint( 'intent_weights.h5', monitor='val_loss', save_best_only=True, save_weights_only=False, verbose=verbose) early_stopping = EarlyStopping( monitor='val_loss', patience=patience) reduce_lr = ReduceLROnPlateau( monitor='val_loss', factor=factor, patience=patience, verbose=verbose) self.model.fit( train_x, train_y, validate_x, validate_y, epochs=epochs, batch_size=batch_size, callbacks=[checkpoint, early_stopping, reduce_lr] )
Example #11
Source File: kashgari_entity_extractor.py From rasa_nlu_gq with Apache License 2.0 | 4 votes |
def train(self, training_data, cfg, **kwargs): labeling_model = eval("labeling." + self.labeling_model) epochs = self.component_config.get('epochs') batch_size = self.component_config.get('batch_size') validation_split = self.component_config.get('validation_split') patience = self.component_config.get('patience') factor = self.component_config.get('factor') verbose = self.component_config.get('verbose') filtered_entity_examples = self.filter_trainable_entities(training_data.training_examples) X, Y = self._create_dataset(filtered_entity_examples) train_x, validate_x, train_y, validate_y = train_test_split( X, Y, test_size=validation_split, random_state=100) self.model = labeling_model(self.bert_embedding) checkpoint = ModelCheckpoint( 'entity_weights.h5', monitor='val_loss', save_best_only=True, save_weights_only=False, verbose=verbose) early_stopping = EarlyStopping( monitor='val_loss', patience=patience) reduce_lr = ReduceLROnPlateau( monitor='val_loss', factor=factor, patience=patience, verbose=verbose) self.model.fit( train_x, train_y, validate_x, validate_y, epochs=epochs, batch_size=batch_size, callbacks=[checkpoint, early_stopping, reduce_lr] )