Python keras.backend.get_value() Examples

The following are 30 code examples of keras.backend.get_value(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #2
Source File: test_bbox.py    From maskrcnn with MIT License 6 votes vote down vote up
def test_get_iou(self):
        gtbox = K.variable([[1, 1, 3, 3], [2, 2, 4, 4]])
        anchor = K.variable([
            [1, 1, 3, 3],  # gtbox[0]とは完全に一致。つまりIoU=1。
            # gtbox[1]とは1/4重なる。つまりIoU=1/7。
            [1, 0, 3, 2],  # gtbox[0]とは半分重なる。つまりIoU=1/3。
            [2, 2, 4, 4],  # gtbox[0]とは1/4重なる。つまりIoU=1/7。gtbox[1]とは一致。
            [0, 3, 2, 5],  # gtbox[0]とは隣接。
            [4, 3, 6, 5],  # gtbox[0]とは接点無し。
        ])
        expected = np.array([
            [1, 1 / 7],
            [1 / 3, 0],
            [1 / 7, 1],
            [0, 0],
            [0, 0],
        ])
        iou = K.get_value(bbox.get_iou(anchor, gtbox))
        np.testing.assert_almost_equal(iou, expected, decimal=5) 
Example #3
Source File: DenseNet_CIFAR10.py    From hacktoberfest2018 with GNU General Public License v3.0 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        current = logs.get(self.monitor)
        lr = self.model.optimizer.lr
        # If you want to apply decay.
        if k.get_value(self.model.optimizer.iterations) == 100:
          k.set_value(self.model.optimizer.lr, 0.01)
          print("Updating Learning rate", 0.01)
        print("Current learning rate", k.get_value(self.model.optimizer.lr))    
        if current is None:
            warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
        #if k.get_value(self.model.optimizer.iterations)%5 == 0:
        #save_to_drive(k.get_value(self.model.optimizer.iterations))        
        if current >= self.value:
            if self.verbose > 0:
                print("Epoch %05d: early stopping THR" % epoch)
            self.model.stop_training = True

# Load CIFAR10 Data 
Example #4
Source File: initializers_test.py    From keras-contrib with MIT License 6 votes vote down vote up
def _runner(init, shape, target_mean=None, target_std=None,
            target_max=None, target_min=None, upper_bound=None, lower_bound=None):
    variable = init(shape)
    if not isinstance(variable, np.ndarray):
        output = K.get_value(variable)
    else:
        output = variable

    lim = 1e-2
    if target_std is not None:
        assert abs(output.std() - target_std) < lim
    if target_mean is not None:
        assert abs(output.mean() - target_mean) < lim
    if target_max is not None:
        assert abs(output.max() - target_max) < lim
    if target_min is not None:
        assert abs(output.min() - target_min) < lim
    if upper_bound is not None:
        assert output.max() < upper_bound
    if lower_bound is not None:
        assert output.min() > lower_bound 
Example #5
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #6
Source File: clr.py    From keras-one-cycle with MIT License 6 votes vote down vote up
def on_batch_end(self, epoch, logs=None):
        logs = logs or {}

        self.clr_iterations += 1
        new_lr = self.compute_lr()

        self.history.setdefault('lr', []).append(
            K.get_value(self.model.optimizer.lr))
        K.set_value(self.model.optimizer.lr, new_lr)

        if self._update_momentum:
            if not hasattr(self.model.optimizer, 'momentum'):
                raise ValueError("Momentum can be updated only on SGD optimizer !")

            new_momentum = self.compute_momentum()

            self.history.setdefault('momentum', []).append(
                K.get_value(self.model.optimizer.momentum))
            K.set_value(self.model.optimizer.momentum, new_momentum)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
Example #7
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		self.input_dim = input_shape[2]

		self.W = self.init((self.output_dim, 4 * self.input_dim),
		                   name='{}_W'.format(self.name))
		self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
		                         name='{}_U'.format(self.name))
		self.b = K.variable(np.hstack((np.zeros(self.input_dim),
		                               K.get_value(self.forget_bias_init((self.input_dim,))),
		                               np.zeros(self.input_dim),
		                               np.zeros(self.input_dim))),
		                    name='{}_b'.format(self.name))

		self.A = self.init((self.input_dim, self.output_dim),
		                    name='{}_A'.format(self.name))
		self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


		self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights 
Example #8
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_ReduceLROnPlateau_patience():
    class DummyOptimizer(object):
        def __init__(self):
            self.lr = K.variable(1.0)

    class DummyModel(object):
        def __init__(self):
            self.optimizer = DummyOptimizer()

    reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    patience=2)
    reduce_on_plateau.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040]
    lrs = []

    for epoch in range(len(losses)):
        reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
        lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))

    # The learning rates should be 1.0 except the last one
    assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0 
Example #9
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_LearningRateScheduler():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon() 
Example #10
Source File: optimizers_225.py    From keras-adamw with MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'momentum': float(K.get_value(self.momentum)),
            'decay': float(K.get_value(self.decay)),
            'nesterov': self.nesterov,
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(SGDW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #11
Source File: optimizers_225.py    From keras-adamw with MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'epsilon': self.epsilon,
            'schedule_decay': self.schedule_decay,
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(NadamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #12
Source File: optimizers_225.py    From keras-adamw with MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'decay': float(K.get_value(self.decay)),
            'batch_size': int(self.batch_size),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': float(K.eval(self.eta_t)),
            'eta_min': float(K.get_value(self.eta_min)),
            'eta_max': float(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose,
            'epsilon': self.epsilon,
            'amsgrad': self.amsgrad
        }
        base_config = super(AdamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #13
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_ReduceLROnPlateau_patience():
    class DummyOptimizer(object):
        def __init__(self):
            self.lr = K.variable(1.0)

    class DummyModel(object):
        def __init__(self):
            self.optimizer = DummyOptimizer()

    reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                    patience=2)
    reduce_on_plateau.model = DummyModel()

    losses = [0.0860, 0.1096, 0.1040]
    lrs = []

    for epoch in range(len(losses)):
        reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
        lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))

    # The learning rates should be 1.0 except the last one
    assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0 
Example #14
Source File: lr_finder.py    From keras_lr_finder with MIT License 6 votes vote down vote up
def on_batch_end(self, batch, logs):
        # Log the learning rate
        lr = K.get_value(self.model.optimizer.lr)
        self.lrs.append(lr)

        # Log the loss
        loss = logs['loss']
        self.losses.append(loss)

        # Check whether the loss got too large or NaN
        if batch > 5 and (math.isnan(loss) or loss > self.best_loss * 4):
            self.model.stop_training = True
            return

        if loss < self.best_loss:
            self.best_loss = loss

        # Increase the learning rate for the next batch
        lr *= self.lr_mult
        K.set_value(self.model.optimizer.lr, lr) 
Example #15
Source File: callbacks.py    From convnet-study with MIT License 5 votes vote down vote up
def change_lr(self, new_lr):
        old_lr = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, new_lr)
        if self.verbose == 1:
            print('Learning rate is %g' %new_lr) 
Example #16
Source File: plot_cam.py    From convnet-study with MIT License 5 votes vote down vote up
def maps_pred_fun(checkpoint):
    # Load model
    model = load_model(checkpoint)
    x = model.input
    # Get feature maps before GAP
    o = [l for l in model.layers if type(l) == GlobalAveragePooling2D][-1].input

    # Setup CAM
    dense_list = [l for l in model.layers if type(l) == Dense]
    num_dense = len(dense_list)
    if num_dense > 1:
        raise ValueError('Expected only one dense layer, found %d' %num_dense)
    # If there is no dense layer after (NiN), the maps are already class maps
    if num_dense: # Apply CAM if there is a dense layer
        dense_layer = dense_list[0]
        # Get dense layer weights
        W = K.get_value(dense_layer.W)[None, None] # (1, 1, ?, ?)
        b = K.get_value(dense_layer.b)

        # Transform it into a 1x1 conv
        # This convolution will map the feature maps into class 'heatmaps'
        o = Convolution2D(W.shape[-1], 1, 1, border_mode='valid', weights=[W, b])(o)

    # Resize with bilinear method
    maps = tf.image.resize_images(o, K.shape(x)[1:3], method=tf.image.ResizeMethod.BILINEAR)
    return K.function([x, K.learning_phase()], [maps, model.output]) 
Example #17
Source File: training.py    From keras_BEGAN with MIT License 5 votes vote down vote up
def k(self):
        return K.get_value(self.k_var) 
Example #18
Source File: save_load_utils_test.py    From keras-contrib with MIT License 5 votes vote down vote up
def test_save_and_load_all_weights():
    '''
    Test save_all_weights and load_all_weights.
    Save and load optimizer and model weights but not configuration.
    '''

    def make_model():
        _x = Input((10,))
        _y = Dense(10)(_x)
        _m = Model(_x, _y)
        _m.compile('adam', 'mean_squared_error')
        _m._make_train_function()
        return _m

    # make a model
    m1 = make_model()
    # set weights
    w1 = m1.layers[1].kernel  # dense layer
    w1value = K.get_value(w1)
    w1value[0, 0:4] = [1, 3, 3, 7]
    K.set_value(w1, w1value)
    # set optimizer weights
    ow1 = m1.optimizer.weights[3]  # momentum weights
    ow1value = K.get_value(ow1)
    ow1value[0, 0:3] = [4, 2, 0]
    K.set_value(ow1, ow1value)
    # save all weights
    save_all_weights(m1, 'model.h5')
    # new model
    m2 = make_model()
    # load all weights
    load_all_weights(m2, 'model.h5')
    # check weights
    assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7])
    # check optimizer weights
    assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0])
    os.remove('model.h5') 
Example #19
Source File: train.py    From reloading with MIT License 5 votes vote down vote up
def set_learning_rate(model):
    # Change the below value during training and see how it updates
    K.set_value(model.optimizer.lr, 1e-3)
    print('Set LR to', K.get_value(model.optimizer.lr)) 
Example #20
Source File: cyclical_learning_rate.py    From keras-contrib with MIT License 5 votes vote down vote up
def on_batch_end(self, epoch, logs=None):

        logs = logs or {}
        self.trn_iterations += 1
        self.clr_iterations += 1
        K.set_value(self.model.optimizer.lr, self.clr())

        self.history.setdefault(
            'lr', []).append(
            K.get_value(
                self.model.optimizer.lr))
        self.history.setdefault('iterations', []).append(self.trn_iterations)

        for k, v in logs.items():
            self.history.setdefault(k, []).append(v) 
Example #21
Source File: ftml.py    From keras-contrib with MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon}
        base_config = super(FTML, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #22
Source File: yogi.py    From keras-contrib with MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon}
        base_config = super(Yogi, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #23
Source File: lars.py    From keras-contrib with MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'momentum': float(K.get_value(self.momentum)),
                  'weight_decay': float(K.get_value(self.weight_decay)),
                  'epsilon': self.epsilon,
                  'eeta': float(K.get_value(self.eeta)),
                  'nesterov': self.nesterov}
        base_config = super(LARS, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #24
Source File: padam.py    From keras-contrib with MIT License 5 votes vote down vote up
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon,
                  'amsgrad': self.amsgrad,
                  'partial': self.partial}
        base_config = super(Padam, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example #25
Source File: train_DGS.py    From pOSAL with MIT License 5 votes vote down vote up
def change_learning_rate_D(model, base_lr, iter, max_iter, power):
    new_lr = lr_poly(base_lr, iter, max_iter, power)
    K.set_value(model.optimizer.lr, new_lr)
    return K.get_value(model.optimizer.lr) 
Example #26
Source File: train_DGS.py    From pOSAL with MIT License 5 votes vote down vote up
def change_learning_rate(model, base_lr, iter, max_iter, power):
    new_lr = lr_poly(base_lr, iter, max_iter, power)
    K.set_value(model.optimizer.lr, new_lr)
    return K.get_value(model.optimizer.lr) 
Example #27
Source File: training.py    From keras_BEGAN with MIT License 5 votes vote down vote up
def loss_gen_x(self):
        return K.get_value(self.loss_gen_x_var) 
Example #28
Source File: training.py    From keras_BEGAN with MIT License 5 votes vote down vote up
def loss_real_x(self):
        return K.get_value(self.loss_real_x_var) 
Example #29
Source File: training.py    From keras_BEGAN with MIT License 5 votes vote down vote up
def m_global(self):
        return K.get_value(self.m_global_var) 
Example #30
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_ReduceLROnPlateau():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def make_model():
        np.random.seed(1337)
        model = Sequential()
        model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=0.1),
                      metrics=['accuracy'])
        return model

    model = make_model()

    # This should reduce the LR after the first epoch (due to high epsilon).
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())

    model = make_model()
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())