Python keras.backend.set_value() Examples
The following are 30
code examples of keras.backend.set_value().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: clr.py From keras-one-cycle with MIT License | 6 votes |
def on_batch_end(self, epoch, logs=None): logs = logs or {} self.clr_iterations += 1 new_lr = self.compute_lr() self.history.setdefault('lr', []).append( K.get_value(self.model.optimizer.lr)) K.set_value(self.model.optimizer.lr, new_lr) if self._update_momentum: if not hasattr(self.model.optimizer, 'momentum'): raise ValueError("Momentum can be updated only on SGD optimizer !") new_momentum = self.compute_momentum() self.history.setdefault('momentum', []).append( K.get_value(self.model.optimizer.momentum)) K.set_value(self.model.optimizer.momentum, new_momentum) for k, v in logs.items(): self.history.setdefault(k, []).append(v)
Example #2
Source File: lr_finder.py From keras_lr_finder with MIT License | 6 votes |
def on_batch_end(self, batch, logs): # Log the learning rate lr = K.get_value(self.model.optimizer.lr) self.lrs.append(lr) # Log the loss loss = logs['loss'] self.losses.append(loss) # Check whether the loss got too large or NaN if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4): self.model.stop_training = True return if loss < self.best_loss: self.best_loss = loss # Increase the learning rate for the next batch lr *= self.lr_mult K.set_value(self.model.optimizer.lr, lr)
Example #3
Source File: snapshot.py From keras-snapshot_ensembles with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs=None): if epoch == 0 or (epoch + 1) % self.period != 0: return # Only save at the end of a cycle, a not at the beginning if not os.path.exists(self.folder_path): os.makedirs(self.folder_path) cycle = int(epoch / self.period) cycle_str = str(cycle).rjust(self.nb_digits, '0') self.model.save_weights(self.path_format.format(cycle_str), overwrite=True) # Resetting the learning rate K.set_value(self.model.optimizer.lr, self.base_lr) if self.verbose > 0: print('\nEpoch %05d: Reached %d-th cycle, saving model.' % (epoch, cycle))
Example #4
Source File: rnnrbm.py From keras_bn_library with MIT License | 6 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.hidden_recurrent_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[2], np.zeros((input_shape[0], self.hidden_dim))) else: self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)), K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.hidden_dim))]
Example #5
Source File: recurrent.py From keras_bn_library with MIT License | 6 votes |
def reset_states(self): assert self.stateful, 'Layer must be stateful.' input_shape = self.input_spec[0].shape if not input_shape[0]: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.') if hasattr(self, 'states'): K.set_value(self.states[0], np.zeros((input_shape[0], self.input_dim))) K.set_value(self.states[1], np.zeros((input_shape[0], self.output_dim))) else: self.states = [K.zeros((input_shape[0], self.input_dim)), K.zeros((input_shape[0], self.output_dim))]
Example #6
Source File: training.py From deep_complex_networks with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): current_score = logs.get('val_acc') divide = False if (epoch + 1) in self.checkpoints: divide = True elif (current_score >= self.previous_score - self.epsilon and current_score <= self.previous_score + self.epsilon): self.wait +=1 if self.wait == self.patience: divide = True else: self.wait = 0 if divide == True: K.set_value(self.model.optimizer.lr, self.model.optimizer.lr.get_value() / self.division_cst) self.wait = 0 if self.verbose > 0: L.getLogger("train").info("Current learning rate is divided by"+str(self.division_cst) + ' and his values is equal to: ' + str(self.model.optimizer.lr.get_value())) self.previous_score = current_score # # Also evaluate performance on test set at each epoch end. #
Example #7
Source File: callbacks.py From training_results_v0.6 with Apache License 2.0 | 6 votes |
def _average_metrics_in_place(self, logs): logs = logs or {} reduced_logs = {} # Reduce every metric among workers. Sort metrics by name # to ensure consistent order. for metric, value in sorted(logs.items()): if metric not in self.variables: self.variables[metric], self.allreduce_ops[metric] = \ self._make_variable(metric, value) else: K.set_value(self.variables[metric], value) reduced_logs[metric] = \ K.get_session().run(self.allreduce_ops[metric]) # Override the reduced values back into logs dictionary # for other callbacks to use. for metric, value in reduced_logs.items(): logs[metric] = value
Example #8
Source File: DenseNet_CIFAR10.py From hacktoberfest2018 with GNU General Public License v3.0 | 6 votes |
def on_epoch_end(self, epoch, logs={}): current = logs.get(self.monitor) lr = self.model.optimizer.lr # If you want to apply decay. if k.get_value(self.model.optimizer.iterations) == 100: k.set_value(self.model.optimizer.lr, 0.01) print("Updating Learning rate", 0.01) print("Current learning rate", k.get_value(self.model.optimizer.lr)) if current is None: warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning) #if k.get_value(self.model.optimizer.iterations)%5 == 0: #save_to_drive(k.get_value(self.model.optimizer.iterations)) if current >= self.value: if self.verbose > 0: print("Epoch %05d: early stopping THR" % epoch) self.model.stop_training = True # Load CIFAR10 Data
Example #9
Source File: optimize.py From connect4-alpha-zero with MIT License | 6 votes |
def update_learning_rate(self, total_steps): # The deepmind paper says # ~400k: 1e-2 # 400k~600k: 1e-3 # 600k~: 1e-4 if total_steps < 500: lr = 1e-2 elif total_steps < 2000: lr = 1e-3 elif total_steps < 9000: lr = 1e-4 else: lr = 2.5e-5 # means (1e-4 / 4): the paper batch size=2048, ours is 512. K.set_value(self.optimizer.lr, lr) logger.debug(f"total step={total_steps}, set learning rate to {lr}")
Example #10
Source File: dnn_train.py From FractalAI with GNU Affero General Public License v3.0 | 6 votes |
def train_on_batch(self, action, n_repeat_action: int = 1, *args, **kwargs): losses, metrics = [], [] # for i in range(n_repeat_action): data = self.get_next_batch(train=True) if not self.is_int(n_repeat_action): K.set_value(self.model.optimizer.lr, n_repeat_action) rate = n_repeat_action if self.is_int(n_repeat_action) else 10 for i in range(rate): while len(data) < self.batch_size: data = self.get_next_batch(train=True) X, y = list(zip(*data)) self.X, self.y = np.array(X), np.array(y) # Dynamic learning rate loss, metric = self.model.train_on_batch(self.X, self.y, *args, **kwargs) losses.append(loss) metrics.append(metric) old_weigths = self.model.get_weights() new_weights = self.update_weights(old_weigths, action, n_repeat_action) self.model.set_weights(new_weights) self.loss, self.metric = np.mean(losses), np.mean(metrics) return self.metric # / self.loss
Example #11
Source File: layers.py From keras-gp with MIT License | 5 votes |
def batch_sz(self, value): K.set_value(self._batch_sz, value)
Example #12
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def reset_states(self): """Reset the state of the metric.""" K.set_value(self.tp, 0)
Example #13
Source File: layers.py From keras-gp with MIT License | 5 votes |
def batch_ids(self, value): K.set_value(self._batch_ids, value)
Example #14
Source File: callbacks.py From bi-lstm-crf with Apache License 2.0 | 5 votes |
def on_train_begin(self, logs=None): '''Initialize the learning rate to the minimum value at the start of training.''' logs = logs or {} K.set_value(self.model.optimizer.lr, self.min_lr)
Example #15
Source File: callbacks.py From bi-lstm-crf with Apache License 2.0 | 5 votes |
def on_epoch_end(self, epoch, logs=None): lr = float(K.get_value(self.model.optimizer.lr)) watch_value = logs.get(self.watch) if watch_value is None: raise ValueError(f"Watched value '{self.watch}' don't exist") if lr <= self.min_lr: return self.history_cache.put(watch_value) if watch_value > self.history_cache.mean(): lr = self.schedule(epoch, lr) print(f"Update learning rate: {lr}") K.set_value(self.model.optimizer.lr, lr)
Example #16
Source File: callbacks.py From bi-lstm-crf with Apache License 2.0 | 5 votes |
def on_train_begin(self, logs=None): logs = logs or {} K.set_value(self.model.optimizer.lr, self.max_lr)
Example #17
Source File: utils.py From kaggle-freesound-audio-tagging with MIT License | 5 votes |
def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 ktf.set_value(self.model.optimizer.lr, self.clr()) self.history.setdefault('lr', []).append(ktf.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for k, v in logs.items(): self.history.setdefault(k, []).append(v)
Example #18
Source File: training.py From aiexamples with Apache License 2.0 | 5 votes |
def on_batch_begin(self, batch, logs=None): self.batch = batch steps_per_epoch = self.params['steps'] iteration = self.epoch * steps_per_epoch + batch lr = self.compute_learning_rate(iteration, self.methode) K.set_value(self.model.optimizer.lr, lr)
Example #19
Source File: recurrent_highway_networks.py From recurrentshop with MIT License | 5 votes |
def on_epoch_begin(self, epoch, logs=None): if epoch > 5: lr = self.lr / 1.04 K.set_value(self.model.optimizer.lr, lr) ########################################### # Build Model ###########################################
Example #20
Source File: save_load_utils_test.py From keras-contrib with MIT License | 5 votes |
def test_save_and_load_all_weights(): ''' Test save_all_weights and load_all_weights. Save and load optimizer and model weights but not configuration. ''' def make_model(): _x = Input((10,)) _y = Dense(10)(_x) _m = Model(_x, _y) _m.compile('adam', 'mean_squared_error') _m._make_train_function() return _m # make a model m1 = make_model() # set weights w1 = m1.layers[1].kernel # dense layer w1value = K.get_value(w1) w1value[0, 0:4] = [1, 3, 3, 7] K.set_value(w1, w1value) # set optimizer weights ow1 = m1.optimizer.weights[3] # momentum weights ow1value = K.get_value(ow1) ow1value[0, 0:3] = [4, 2, 0] K.set_value(ow1, ow1value) # save all weights save_all_weights(m1, 'model.h5') # new model m2 = make_model() # load all weights load_all_weights(m2, 'model.h5') # check weights assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7]) # check optimizer weights assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0]) os.remove('model.h5')
Example #21
Source File: cyclical_learning_rate.py From keras-contrib with MIT License | 5 votes |
def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 K.set_value(self.model.optimizer.lr, self.clr()) self.history.setdefault( 'lr', []).append( K.get_value( self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for k, v in logs.items(): self.history.setdefault(k, []).append(v)
Example #22
Source File: cyclical_learning_rate.py From keras-contrib with MIT License | 5 votes |
def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr())
Example #23
Source File: train_DGS.py From pOSAL with MIT License | 5 votes |
def change_learning_rate_D(model, base_lr, iter, max_iter, power): new_lr = lr_poly(base_lr, iter, max_iter, power) K.set_value(model.optimizer.lr, new_lr) return K.get_value(model.optimizer.lr)
Example #24
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def reset_states(self): """Reset the state of the metric.""" K.set_value(self.tn, 0)
Example #25
Source File: train_DGS.py From pOSAL with MIT License | 5 votes |
def change_learning_rate(model, base_lr, iter, max_iter, power): new_lr = lr_poly(base_lr, iter, max_iter, power) K.set_value(model.optimizer.lr, new_lr) return K.get_value(model.optimizer.lr)
Example #26
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def reset_states(self): """Reset the state of the metric.""" K.set_value(self.fn, 0)
Example #27
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def reset_states(self): """Reset the state of the metric.""" K.set_value(self.fp, 0)
Example #28
Source File: model.py From CycleGAN-Keras with GNU General Public License v3.0 | 5 votes |
def update_lr(self, model, decay): new_lr = K.get_value(model.optimizer.lr) - decay if new_lr < 0: new_lr = 0 # print(K.get_value(model.optimizer.lr)) K.set_value(model.optimizer.lr, new_lr)
Example #29
Source File: metrics.py From keras-metrics with MIT License | 5 votes |
def reset_states(self): K.set_value(self.tp, [0]*self.labels) K.set_value(self.fn, [0]*self.labels)
Example #30
Source File: callbacks.py From bi-lstm-crf with Apache License 2.0 | 5 votes |
def on_batch_end(self, epoch, logs=None): '''Record previous batch statistics and update the learning rate.''' logs = logs or {} self.iteration += 1 self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.iteration) for k, v in logs.items(): self.history.setdefault(k, []).append(v) K.set_value(self.model.optimizer.lr, self.clr())