Python tensorflow.keras.callbacks.LearningRateScheduler() Examples
The following are 7
code examples of tensorflow.keras.callbacks.LearningRateScheduler().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.callbacks
, or try the search function
.
Example #1
Source File: train.py From bcnn with MIT License | 6 votes |
def train(weights_path, epochs, batch_size, initial_epoch, kl_start_epoch, kl_alpha_increase_per_epoch): """Trains a model.""" print ('loading data...') # Loads or creates training data. input_shape, train, valid, train_targets, valid_targets = get_train_data() print ('getting model...') # Loads or creates model. model, checkpoint_path, kl_alpha = get_model(input_shape, scale_factor=len(train)/batch_size, weights_path=weights_path) # Sets callbacks. checkpointer = ModelCheckpoint(checkpoint_path, verbose=1, save_weights_only=True, save_best_only=True) scheduler = LearningRateScheduler(schedule) annealer = Callback() if kl_alpha is None else AnnealingCallback(kl_alpha, kl_start_epoch, kl_alpha_increase_per_epoch) print ('fitting model...') # Trains model. model.fit(train, train_targets, batch_size, epochs, initial_epoch=initial_epoch, callbacks=[checkpointer, scheduler, annealer], validation_data=(valid, valid_targets))
Example #2
Source File: cnn_cifar_optuna_affinity.py From affinity-loss with MIT License | 6 votes |
def train(lambd, sigma, n_centers, trial): K.clear_session() (X_train, y_train), (X_test, y_test) = inbalanced_cifar(200) model = create_models(sigma, n_centers) model.compile("adam", affinity_loss(lambd), [acc]) tf.logging.set_verbosity(tf.logging.FATAL) # ログを埋めないようにする tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"] tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url) strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver) model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy) scheduler = LearningRateScheduler(step_decay) f1 = F1Callback(model, X_test, y_test, trial) history = model.fit(X_train, y_train, callbacks=[scheduler, f1], batch_size=640, epochs=100, verbose=0).history max_f1 = max(f1.f1_log) print(f"lambda:{lambd:.04}, sigma:{sigma:.04} n_centers:{n_centers} / f1 = {max_f1:.04}") return max_f1
Example #3
Source File: cnn_cifar_softmax.py From affinity-loss with MIT License | 6 votes |
def train(inbalance_size): (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size) y_train, y_test = y_train[:, :10], y_test[:, :10] model = create_models() model.compile("adam", "categorical_crossentropy", ["acc"]) tf.logging.set_verbosity(tf.logging.FATAL) tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"] tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url) strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver) model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy) scheduler = LearningRateScheduler(step_decay) f1 = F1Callback(model, X_test, y_test) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1], batch_size=640, epochs=100, verbose=0).history max_acc = max(history["val_acc"]) max_f1 = max(f1.f1_log) print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}")
Example #4
Source File: cnn_cifar_affinity.py From affinity-loss with MIT License | 6 votes |
def train(inbalance_size): (X_train, y_train), (X_test, y_test) = inbalanced_cifar(inbalance_size) model = create_models() model.compile("adam", affinity_loss(0.43), [acc]) tf.logging.set_verbosity(tf.logging.FATAL) tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"] tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url) strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver) model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy) scheduler = LearningRateScheduler(step_decay) f1 = F1Callback(model, X_test, y_test) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), callbacks=[scheduler, f1], batch_size=640, epochs=100, verbose=0).history max_acc = max(history["val_acc"]) max_f1 = max(f1.f1_log) print(f"{inbalance_size} {max_acc:.04} {max_f1:.04}")
Example #5
Source File: fcn-12.3.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def train(self): """Train an FCN""" optimizer = Adam(lr=1e-3) loss = 'categorical_crossentropy' self.fcn.compile(optimizer=optimizer, loss=loss) log = "# of classes %d" % self.n_classes print_log(log, self.args.verbose) log = "Batch size: %d" % self.args.batch_size print_log(log, self.args.verbose) # prepare callbacks for saving model weights # and learning rate scheduler # model weights are saved when test iou is highest # learning rate decreases by 50% every 20 epochs # after 40th epoch accuracy = AccuracyCallback(self) scheduler = LearningRateScheduler(lr_scheduler) callbacks = [accuracy, scheduler] # train the fcn network self.fcn.fit(x=self.train_generator, use_multiprocessing=False, callbacks=callbacks, epochs=self.args.epochs) #workers=self.args.workers)
Example #6
Source File: iic-13.5.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def train(self): """Train function uses the data generator, accuracy computation, and learning rate scheduler callbacks """ accuracy = AccuracyCallback(self) lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1) callbacks = [accuracy, lr_scheduler] self._model.fit(x=self.train_gen, use_multiprocessing=False, epochs=self.args.epochs, callbacks=callbacks, shuffle=True)
Example #7
Source File: mine-13.8.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 5 votes |
def train(self): """Train MINE to estimate MI between X and Y (eg MNIST image and its transformed version) """ accuracy = AccuracyCallback(self) lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1) callbacks = [accuracy, lr_scheduler] self._model.fit(x=self.train_gen, use_multiprocessing=False, epochs=self.args.epochs, callbacks=callbacks, shuffle=True)