Python keras.callbacks.Callback() Examples
The following are 30
code examples of keras.callbacks.Callback().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.callbacks
, or try the search function
.
Example #1
Source File: callbacks.py From visual_turing_test-tutorial with MIT License | 6 votes |
def __init__(self, filepath, epoch_interval, verbose=0): """ In: filepath - formattable filepath; possibilities: * weights.{epoch:02d} * weights.{era:02d} epoch_interval - number of epochs that must be passed from the previous saving verbose - if nonzero then print out information on stdout; by default 0 """ super(KerasCallback, self).__init__() self.filepath = filepath self.epoch_interval = epoch_interval self.verbose = verbose self.era = 0
Example #2
Source File: callbacks.py From visual_turing_test-tutorial with MIT License | 6 votes |
def __init__(self, patience=0, reduce_rate=0.5, reduce_nb=10, is_early_stopping=True, verbose=1): """ In: patience - number of beginning epochs without reduction; by default 0 reduce_rate - multiplicative rate reducer; by default 0.5 reduce_nb - maximal number of reductions performed; by default 10 is_early_stopping - if true then early stopping is applied when reduce_nb is reached; by default True verbose - verbosity level; by default 1 """ super(KerasCallback, self).__init__() self.patience = patience self.wait = 0 self.best_score = -1. self.reduce_rate = reduce_rate self.current_reduce_nb = 0 self.reduce_nb = reduce_nb self.is_early_stopping = is_early_stopping self.verbose = verbose self.epsilon = 0.1e-10
Example #3
Source File: callbacks.py From visual_turing_test-tutorial with MIT License | 6 votes |
def __init__(self, patience=0, reduce_rate=0.5, reduce_nb=10, verbose=1): """ In: patience - number of epochs in stagnation; by default 0 reduce_rate - multiplicative rate reducer; by default 0.5 reduce_nb - maximal number of reductions performed; by default 10 verbose - verbosity level; by default 1 """ super(KerasCallback, self).__init__() self.patience = patience self.wait = 0 self.best_score = -1. self.reduce_rate = reduce_rate self.current_reduce_nb = 0 self.reduce_nb = reduce_nb self.is_early_stopping = False self.verbose = verbose self.epsilon = 0.1e-10
Example #4
Source File: train.py From landmark-recognition-challenge with GNU General Public License v3.0 | 6 votes |
def process_item_worker_triplet(worker_id, lock, shared_mem_X, shared_mem_y, jobs, results): # make sure augmentations are different for each worker np.random.seed() random.seed() while True: items, augs, training, predict = jobs.get() img_p1, one_hot_class_idx_p1, item_p1 = process_item(items[0], augs[0], training, predict) img_p2, one_hot_class_idx_p2, item_p2 = process_item(items[1], augs[1], training, predict) img_n1, one_hot_class_idx_n1, item_n1 = process_item(items[2], augs[2], training, predict) is_good_item = False if (one_hot_class_idx_p1 is not None) and (one_hot_class_idx_p2 is not None) and (one_hot_class_idx_n1 is not None): lock.acquire() shared_mem_X[worker_id,...,0] = img_p1 shared_mem_X[worker_id,...,1] = img_p2 shared_mem_X[worker_id,...,2] = img_n1 is_good_item = True results.put((worker_id, is_good_item, (item_p1, item_p2, item_n1))) # Callback to monitor accuracy on a per-batch basis
Example #5
Source File: approximateMatch.py From twitter-adr-blstm with GNU General Public License v3.0 | 5 votes |
def __init__(self, valid_toks, valid_y, valid_x, idx2label, pred_dir='./predictions', fileprefix=''): super(Callback, self).__init__() self.valid_toks = valid_toks self.valid_y = valid_y self.valid_x = valid_x self.padlen = valid_x.shape[1] self.fileprefix=fileprefix self.idx2label = idx2label self.pred_dir = pred_dir try: os.makedirs(pred_dir) except: pass
Example #6
Source File: approximateMatch.py From twitter-adr-blstm with GNU General Public License v3.0 | 5 votes |
def __init__(self, valid_toks, valid_y, X_valid, padlen, idx2label, pred_dir='./predictions'): super(Callback, self).__init__() self.valid_toks = valid_toks self.valid_y = valid_y self.X_valid = X_valid self.padlen = padlen assert X_valid.shape[0] == padlen * len(valid_toks) self.window = X_valid.shape[1] self.idx2label = idx2label self.pred_dir = pred_dir try: os.makedirs(pred_dir) except: pass
Example #7
Source File: train.py From deconvfaces with MIT License | 5 votes |
def __init__(self, output_dir, num_identities, batch_size=32, use_yale=False, use_jaffe=False): """ Constructor for a GenerateIntermediate object. Args: output_dir (str): Directory to save intermediate results in. num_identities (int): Number of identities in the training set. Args: (optional) batch_size (int): Batch size to use when generating images. """ super(Callback, self).__init__() self.output_dir = output_dir self.num_identities = num_identities self.batch_size = batch_size self.use_yale = use_yale self.use_jaffe = use_jaffe self.parameters = dict() # Sweep through identities self.parameters['identity'] = np.eye(num_identities) if use_yale: # Use pose 0, lighting at 0deg azimuth and elevation self.parameters['pose'] = np.zeros((num_identities, NUM_YALE_POSES)) self.parameters['lighting'] = np.zeros((num_identities, 4)) for i in range(0, num_identities): self.parameters['pose'][i,0] = 0 self.parameters['lighting'][i,1] = 1 self.parameters['lighting'][i,3] = 1 else: # Make all have neutral expressions, front-facing self.parameters['emotion'] = np.empty((num_identities, Emotion.length())) self.parameters['orientation'] = np.zeros((num_identities, 2)) for i in range(0, num_identities): self.parameters['emotion'][i,:] = Emotion.neutral self.parameters['orientation'][i,1] = 1
Example #8
Source File: train.py From landmark-recognition-challenge with GNU General Public License v3.0 | 5 votes |
def reset_accuracy(self, group=-1, save = False): self.accuracy_reached = False self.last_accuracies = np.zeros(AccuracyReset.N_BATCHES) self.last_accuracies_i = 0 if group != -1 and save: self.model.save( self.filepath.format(group= group, epoch= self.epoch + 1), overwrite=True) return # Callback to monitor accuracy on a per-batch basis
Example #9
Source File: net.py From speechless with MIT License | 5 votes |
def create_callbacks(self, callback: Callable[[], None], tensor_board_log_directory: Path, net_directory: Path, callback_step: int = 1, save_step: int = 1) -> List[Callback]: class CustomCallback(Callback): def on_epoch_end(self_callback, epoch, logs=()): if epoch % callback_step == 0: callback() if epoch % save_step == 0 and epoch > 0: mkdir(net_directory) self.predictive_net.save_weights(str(net_directory / self.model_file_name(epoch))) tensorboard_if_running_tensorflow = [TensorBoard( log_dir=str(tensor_board_log_directory), write_images=True)] if backend.backend() == 'tensorflow' else [] return tensorboard_if_running_tensorflow + [CustomCallback()]
Example #10
Source File: base.py From stacking with MIT License | 5 votes |
def __init__(self, validation_data=(), interval=10): super(Callback, self).__init__() self.interval = interval self.X_val, self.y_val = validation_data
Example #11
Source File: protocol_core.py From hyperparameter_hunter with MIT License | 5 votes |
def set_dimensions(self): """Locate given hyperparameters that are `space` choice declarations and add them to :attr:`dimensions`""" all_dimension_choices = [] #################### Remap Extra Objects #################### if self.module_name == "keras": from keras.initializers import Initializer as KerasInitializer from keras.callbacks import Callback as KerasCB self.init_iter_attrs.append(lambda _p, _k, _v: isinstance(_v, KerasInitializer)) self.extra_iter_attrs.append(lambda _p, _k, _v: isinstance(_v, KerasCB)) #################### Collect Choice Dimensions #################### init_dim_choices = get_choice_dimensions(self.model_init_params, self.init_iter_attrs) extra_dim_choices = get_choice_dimensions(self.model_extra_params, self.extra_iter_attrs) fe_dim_choices = get_choice_dimensions(self.feature_engineer, self.fe_iter_attrs) for (path, choice) in init_dim_choices: choice._name = ("model_init_params",) + path all_dimension_choices.append(choice) for (path, choice) in extra_dim_choices: choice._name = ("model_extra_params",) + path all_dimension_choices.append(choice) for (path, choice) in fe_dim_choices: choice._name = ("feature_engineer",) + path all_dimension_choices.append(choice) self.dimensions = all_dimension_choices if self.module_name == "keras": self.model_extra_params = link_choice_ids( self.dummy_layers, self.dummy_compile_params, self.model_extra_params, self.dimensions, )
Example #12
Source File: train.py From EAST with GNU General Public License v3.0 | 5 votes |
def __init__(self, validation_data, validation_log_dir, period=5): super(Callback, self).__init__() self.period = period self.validation_data = validation_data self.validation_log_dir = validation_log_dir self.val_writer = tf.summary.FileWriter(self.validation_log_dir)
Example #13
Source File: DEC.py From DEC-keras with MIT License | 5 votes |
def pretrain(self, x, y=None, optimizer='adam', epochs=200, batch_size=256, save_dir='results/temp'): print('...Pretraining...') self.autoencoder.compile(optimizer=optimizer, loss='mse') csv_logger = callbacks.CSVLogger(save_dir + '/pretrain_log.csv') cb = [csv_logger] if y is not None: class PrintACC(callbacks.Callback): def __init__(self, x, y): self.x = x self.y = y super(PrintACC, self).__init__() def on_epoch_end(self, epoch, logs=None): if int(epochs/10) != 0 and epoch % int(epochs/10) != 0: return feature_model = Model(self.model.input, self.model.get_layer( 'encoder_%d' % (int(len(self.model.layers) / 2) - 1)).output) features = feature_model.predict(self.x) km = KMeans(n_clusters=len(np.unique(self.y)), n_init=20, n_jobs=4) y_pred = km.fit_predict(features) # print() print(' '*8 + '|==> acc: %.4f, nmi: %.4f <==|' % (metrics.acc(self.y, y_pred), metrics.nmi(self.y, y_pred))) cb.append(PrintACC(x, y)) # begin pretraining t0 = time() self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=cb) print('Pretraining time: %ds' % round(time() - t0)) self.autoencoder.save_weights(save_dir + '/ae_weights.h5') print('Pretrained weights are saved to %s/ae_weights.h5' % save_dir) self.pretrained = True
Example #14
Source File: train.py From CNN_keras with MIT License | 5 votes |
def train(pic_folder, weight_folder): if not os.path.exists(weight_folder): os.makedirs(weight_folder) x_train, y_train, x_test, y_test = prepare_data(pic_folder) model = build_model() print('... compile models') model.compile( optimizer='adadelta', loss=['categorical_crossentropy'], metrics=['accuracy'], ) print('... begin train') check_point = ModelCheckpoint( os.path.join(weight_folder, '{epoch:02d}.hdf5')) class TestAcc(Callback): def on_epoch_end(self, epoch, logs=None): weight_file = os.path.join( weight_folder, '{epoch:02d}.hdf5'.format(epoch=epoch + 1)) model.load_weights(weight_file) out = model.predict(x_test, verbose=1) predict = np.array([np.argmax(i) for i in out]) answer = np.array([np.argmax(i) for i in y_test]) acc = np.sum(predict == answer) / len(predict) print('Single letter test accuracy: {:.2%}'.format(acc)) print('Picture accuracy: {:.2%}'.format(np.power(acc, 5))) print('----------------------------------\n') model.fit( x_train, y_train, batch_size=128, epochs=100, validation_split=0.1, callbacks=[check_point, TestAcc()], )
Example #15
Source File: optimize.py From reversi-alpha-zero with MIT License | 5 votes |
def training(self): self.compile_model() total_steps = self.config.trainer.start_total_steps save_model_callback = PerStepCallback(self.config.trainer.save_model_steps, self.save_current_model, self.config.trainer.wait_after_save_model_ratio) callbacks = [save_model_callback] # type: list[Callback] tb_callback = None # type: TensorBoardStepCallback if self.config.trainer.use_tensorboard: tb_callback = TensorBoardStepCallback( log_dir=self.config.resource.tensorboard_log_dir, logging_per_steps=self.config.trainer.logging_per_steps, step=total_steps, ) callbacks.append(tb_callback) while True: self.load_play_data() if self.dataset_size < self.config.trainer.min_data_size_to_learn: logger.info(f"dataset_size={self.dataset_size} is less than {self.config.trainer.min_data_size_to_learn}") sleep(10) continue self.update_learning_rate(total_steps) total_steps += self.train_epoch(self.config.trainer.epoch_to_checkpoint, callbacks) self.count_up_training_count_and_delete_self_play_data_files() if tb_callback: # This code is never reached. But potentially this is required. tb_callback.close()
Example #16
Source File: callback.py From CheXNet-Keras with MIT License | 5 votes |
def __init__(self, sequence, class_names, weights_path, stats=None, workers=1): super(Callback, self).__init__() self.sequence = sequence self.workers = workers self.class_names = class_names self.weights_path = weights_path self.best_weights_path = os.path.join( os.path.split(weights_path)[0], f"best_{os.path.split(weights_path)[1]}", ) self.best_auroc_log_path = os.path.join( os.path.split(weights_path)[0], "best_auroc.log", ) self.stats_output_path = os.path.join( os.path.split(weights_path)[0], ".training_stats.json" ) # for resuming previous training if stats: self.stats = stats else: self.stats = {"best_mean_auroc": 0} # aurocs log self.aurocs = {} for c in self.class_names: self.aurocs[c] = []
Example #17
Source File: callback.py From CheXNet-Keras with MIT License | 5 votes |
def __init__(self, filepath, base_model, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): super(Callback, self).__init__() self.base_model = base_model self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.period = period self.epochs_since_last_save = 0 if mode not in ['auto', 'min', 'max']: warnings.warn('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % (mode), RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
Example #18
Source File: models.py From DiscriminativeActiveLearning with MIT License | 5 votes |
def __init__(self, monitor='acc', threshold=0.98, verbose=0): super(Callback, self).__init__() self.monitor = monitor self.threshold = threshold self.verbose = verbose self.improved = 0
Example #19
Source File: 03_autoencoding_and_tsne.py From Convolutional-Autoencoder-Music-Similarity with MIT License | 5 votes |
def __init__(self, monitor='val_loss', value=0.1, verbose=0): super(Callback, self).__init__() self.monitor = monitor self.value = value self.verbose = verbose
Example #20
Source File: DenseNet_CIFAR10.py From hacktoberfest2018 with GNU General Public License v3.0 | 5 votes |
def __init__(self, monitor='val_acc', mode='max', value=0.92, verbose=0): super(Callback, self).__init__() self.monitor = monitor self.value = value self.verbose = verbose
Example #21
Source File: train.py From PJ_NLP with Apache License 2.0 | 5 votes |
def __init__(self, validation_generate, steps_per_epoch): super(Callback, self).__init__() self.validation_generate = validation_generate self.steps_per_epoch = steps_per_epoch
Example #22
Source File: callbacks.py From PSPNet-Keras-tensorflow with MIT License | 5 votes |
def __init__(self, base_lr = 0.01, max_epoch = 150, power=0.9, verbose=1): super(Callback, self).__init__() self.max_epoch = max_epoch self.power = power self.verbose = verbose self.base_lr = base_lr
Example #23
Source File: training.py From deep_complex_networks with MIT License | 5 votes |
def __init__(self, patience=float(50000), division_cst=10.0, epsilon=1e-03, verbose=1, epoch_checkpoints={41, 61}): super(Callback, self).__init__() self.patience = patience self.checkpoints = epoch_checkpoints self.wait = 0 self.previous_score = 0. self.division_cst = division_cst self.epsilon = epsilon self.verbose = verbose self.iterations = 0
Example #24
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.acc = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
Example #25
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.f1 = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
Example #26
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.reca = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
Example #27
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.prec = 0 self.X_val, self.y_val = validation_data self.filepath = filepath self.mymil = mymil
Example #28
Source File: roc_auc.py From deep-mil-for-whole-mammogram-classification with MIT License | 5 votes |
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.auc = 0 self.X_val, self.y_val = validation_data self.filepath = filepath self.mymil = mymil
Example #29
Source File: libraries.py From vergeml with MIT License | 4 votes |
def callback(env, display_progress, stats): from keras.callbacks import Callback class KerasCallback(Callback): def __init__(self, env, display_progress, stats): self.env = env self.display_progress = display_progress self.stats = stats self.callback = None self.current_epoch = 0 self.current_step = 0 def on_train_begin(self, logs=None): logs = KerasCallback._xform_logs(logs) self.callback = env.progress_callback(self.params['epochs'], self.params['steps'], self.display_progress, self.stats) def on_train_end(self, logs=None): logs = KerasCallback._xform_logs(logs) self.callback(self.current_epoch, self.current_step, **logs) def on_epoch_begin(self, epoch, logs=None): logs = KerasCallback._xform_logs(logs) self.callback(self.current_epoch, self.current_step, **logs) def on_epoch_end(self, epoch, logs=None): logs = KerasCallback._xform_logs(logs) self.current_epoch += 1 self.callback(self.current_epoch, self.current_step, **logs) def on_batch_begin(self, batch, logs=None): logs = KerasCallback._xform_logs(logs) self.callback(self.current_epoch, self.current_step, **logs) def on_batch_end(self, batch, logs=None): logs = KerasCallback._xform_logs(logs) self.current_step += 1 self.callback(self.current_epoch, self.current_step, **logs) @staticmethod def _xform_logs(logs): from copy import deepcopy logs = deepcopy(logs or {}) for k in ('size', 'batch', 'epoch'): if k in logs: del logs[k] return {k.replace('_', '-'):v for k, v in logs.items()} return KerasCallback(env, display_progress, stats)
Example #30
Source File: retain_train.py From retain-keras with Apache License 2.0 | 4 votes |
def create_callbacks(model, data, ARGS): """Create the checkpoint and logging callbacks""" class LogEval(Callback): """Logging Callback""" def __init__(self, filepath, model, data, ARGS, interval=1): super(Callback, self).__init__() self.filepath = filepath self.interval = interval self.data_test, self.y_test = data self.generator = SequenceBuilder(data=self.data_test, target=self.y_test, batch_size=ARGS.batch_size, ARGS=ARGS, target_out=False) self.model = model def on_epoch_end(self, epoch, logs={}): #Compute ROC-AUC and average precision the validation data every interval epochs if epoch % self.interval == 0: #Compute predictions of the model y_pred = [x[-1] for x in self.model.predict_generator(self.generator, verbose=0, use_multiprocessing=True, workers=5, max_queue_size=5)] score_roc = roc_auc_score(self.y_test, y_pred) score_pr = average_precision_score(self.y_test, y_pred) #Create log files if it doesn't exist, otherwise write to it if os.path.exists(self.filepath): append_write = 'a' else: append_write = 'w' with open(self.filepath, append_write) as file_output: file_output.write("\nEpoch: {:d}- ROC-AUC: {:.6f} ; PR-AUC: {:.6f}"\ .format(epoch, score_roc, score_pr)) print("\nEpoch: {:d} - ROC-AUC: {:.6f} PR-AUC: {:.6f}"\ .format(epoch, score_roc, score_pr)) #Create callbacks if not os.path.exists(ARGS.directory): os.makedirs(ARGS.directory) checkpoint = ModelCheckpoint(filepath=ARGS.directory+'/weights.{epoch:02d}.hdf5') log = LogEval(ARGS.directory+'/log.txt', model, data, ARGS) return(checkpoint, log)