Python keras.backend.gradients() Examples
The following are 30
code examples of keras.backend.gradients().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: 7_visualize_filters.py From deep-learning-note with MIT License | 8 votes |
def generate_pattern(layer_name, filter_index, size=150): # 过滤器可视化函数 layer_output = model.get_layer(layer_name).output loss = K.mean(layer_output[:, :, :, filter_index]) grads = K.gradients(loss, model.input)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) iterate = K.function([model.input], [loss, grads]) input_img_data = np.random.random((1, size, size, 3)) * 20 + 128. step = 1 for _ in range(40): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step img = input_img_data[0] return deprocess_image(img)
Example #2
Source File: gen_adversarial2.py From MalConv-keras with MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1): adv = inp.copy() loss = K.mean(model.output[:, target_class]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, grads_value.mean(), end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example #3
Source File: wgan_gp.py From Keras-GAN with MIT License | 6 votes |
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples): """ Computes gradient penalty based on prediction and weighted real / fake samples """ gradients = K.gradients(y_pred, averaged_samples)[0] # compute the euclidean norm by squaring ... gradients_sqr = K.square(gradients) # ... summing over the rows ... gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # ... and sqrt gradient_l2_norm = K.sqrt(gradients_sqr_sum) # compute lambda * (1 - ||grad||)^2 still for each single sample gradient_penalty = K.square(1 - gradient_l2_norm) # return the mean as loss over all the batch samples return K.mean(gradient_penalty)
Example #4
Source File: keras_yolov3.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def predictions_and_gradient(self, image, criterion): """ Returns both predictions and gradients, and potentially loss w.r.t. to certain criterion. """ input_shape = image.shape px, dpdx = self._process_input(image) if isinstance(criterion, TargetClassMiss) or \ isinstance(criterion, RegionalTargetClassMiss): boxes, scores, classes, loss, gradient =\ self._tgt_cls_pred_and_grad_fn( [px[np.newaxis], criterion.target_class()]) else: raise NotImplementedError prediction = {} num = (scores[0] > 0.).sum() prediction['boxes'] = boxes[0][:num].tolist() prediction['scores'] = scores[0][:num].tolist() prediction['classes'] = classes[0][:num].tolist() gradient = self._process_gradient(dpdx, gradient) assert gradient.shape == input_shape return prediction, loss, gradient,
Example #5
Source File: metrics.py From voxelmorph with GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): # get the value for the true and fake images disc_true = self.disc(y_true) disc_pred = self.disc(y_pred) # sample a x_hat by sampling along the line between true and pred # z = tf.placeholder(tf.float32, shape=[None, 1]) # shp = y_true.get_shape()[0] # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!! # self.batch_size does not work, since it's not None!!! alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1]) diff = y_pred - y_true interp = y_true + alpha * diff # take gradient of D(x_hat) gradients = K.gradients(self.disc(interp), [interp])[0] grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1)) # compute loss return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
Example #6
Source File: active_nn.py From Kitchen2D with MIT License | 6 votes |
def query(self, context): x0, x0context = helper.find_closest_positive_context_param( context, self.xx, self.yy, self.func.param_idx, self.func.context_idx) g = kb.gradients(self.model.outputs[0], self.model.inputs) gfn = kb.function(self.model.inputs, g) def fn(param): x = np.hstack((param, np.tile(context, (param.shape[0], 1)))) return -self.model.predict(x).astype(np.float64) def fgfn(param): x = np.hstack((param, context)) return -self.model.predict(np.array([x]))[0].astype(np.float64), \ -gfn([np.array([x])])[0][0, self.func.param_idx].astype(np.float64) x_range = self.func.x_range guesses = helper.grid_around_point( x0, 0.5*(x_range[1]-x_range[0]), 5, x_range) x_star, y_star = helper.global_minimize( fn, fgfn, x_range[:, self.func.param_idx], 10000, guesses) print('x_star={}, y_star={}'.format(x_star, y_star)) return np.hstack((x_star, context))
Example #7
Source File: gen_adversarial.py From MalConv-keras with MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001): adv = inp.copy() loss = K.mean(model.output[:, 0]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example #8
Source File: reversing_gan.py From gandlf with MIT License | 6 votes |
def reverse_generator(generator, X_sample, y_sample, title): """Gradient descent to map images back to their latent vectors.""" latent_vec = np.random.normal(size=(1, 100)) # Function for figuring out how to bump the input. target = K.placeholder() loss = K.sum(K.square(generator.outputs[0] - target)) grad = K.gradients(loss, generator.inputs[0])[0] update_fn = K.function(generator.inputs + [target], [grad]) # Repeatedly apply the update rule. xs = [] for i in range(60): print('%d: latent_vec mean=%f, std=%f' % (i, np.mean(latent_vec), np.std(latent_vec))) xs.append(generator.predict_on_batch([latent_vec, y_sample])) for _ in range(10): update_vec = update_fn([latent_vec, y_sample, X_sample])[0] latent_vec -= update_vec * update_rate # Plots the samples. xs = np.concatenate(xs, axis=0) plot_as_gif(xs, X_sample, title)
Example #9
Source File: dream1.py From keras-examples with MIT License | 6 votes |
def render_naive(layer_name, filter_index, img0=img_noise, iter_n=20, step=1.0): if layer_name not in layer_dict: print("ERROR: invalid layer name: %s" % layer_name) return layer = layer_dict[layer_name] print("{} < {}".format(filter_index, layer.output_shape[-1])) activation = K.mean(layer.output[:, :, :, filter_index]) grads = K.gradients(activation, input_tensor)[0] # DropoutやBNを含むネットワークはK.learning_phase()が必要 iterate = K.function([input_tensor, K.learning_phase()], [activation, grads]) img = img0.copy() for i in range(iter_n): # 学習はしないので0を入力 activation_value, grads_value = iterate([img, 0]) grads_value /= K.std(grads_value) + 1e-8 img += grads_value * step print(i, activation_value)
Example #10
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #11
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #12
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #13
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #14
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #15
Source File: deep_dream.py From costar_plan with Apache License 2.0 | 6 votes |
def eval_loss_and_grads(x): x = x.reshape((1,) + img_size) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #16
Source File: neural_style_transfer.py From pCVR with Apache License 2.0 | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #17
Source File: optimizer.py From keras-vis with MIT License | 6 votes |
def _rmsprop(self, grads, cache=None, decay_rate=0.95): """Uses RMSProp to compute step from gradients. Args: grads: numpy array of gradients. cache: numpy array of same shape as `grads` as RMSProp cache decay_rate: How fast to decay cache Returns: A tuple of step: numpy array of the same shape as `grads` giving the step. Note that this does not yet take the learning rate into account. cache: Updated RMSProp cache. """ if cache is None: cache = np.zeros_like(grads) cache = decay_rate * cache + (1 - decay_rate) * grads ** 2 step = -grads / np.sqrt(cache + K.epsilon()) return step, cache
Example #18
Source File: gram.py From subjective-functions with MIT License | 6 votes |
def gram_loss_callable(gram_model, target_grams, shape): ''' Returns a function which takes in an image and outputs both the gram-matrix loss of that image relative to the targets, and the gradients of that loss with respect to the image pixels''' loss = diff_loss(gram_model, target_grams) gradients = K.gradients(loss, gram_model.input) if keras.backend.backend() == 'tensorflow': gradients = gradients[0] # This is a Keras inconsistency between theano and tf backends loss_and_gradients = K.function([gram_model.input], [loss, gradients]) def callable(x): deflattened = x.reshape([-1] + list(shape) + [3]) loss, grad = loss_and_gradients([deflattened]) #print(formatter.format("{:q} ", float(loss)), end=' | ', flush=True) return loss.astype('float64'), np.ravel(grad.astype('float64')) return callable
Example #19
Source File: gram.py From subjective-functions with MIT License | 6 votes |
def loss_and_gradients_callable(loss_model, shape): loss = loss_model.output gradients = K.gradients(loss, loss_model.input) if keras.backend.backend() == 'tensorflow': gradients = gradients[0] # This is a Keras inconsistency between theano and tf backends loss_and_gradients = K.function([loss_model.input], [loss, gradients]) def callable(x): deflattened = x.reshape([-1] + list(shape) + [3]) loss, grad = loss_and_gradients([deflattened]) #print(formatter.format("{:q} ", float(loss)), end=' | ', flush=True) return loss.astype('float64'), np.ravel(grad.astype('float64')) return callable
Example #20
Source File: neural_style_transfer.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values # this Evaluator class makes it possible # to compute loss and gradients in one pass # while retrieving them via two separate functions, # "loss" and "grads". This is done because scipy.optimize # requires separate functions for loss and gradients, # but computing them separately would be inefficient.
Example #21
Source File: custom_gradients.py From DeepIV with MIT License | 6 votes |
def get_gradients(self, loss, params): ''' Replacement for the default keras get_gradients() function. Modification: checks if the object has the attribute grads and returns that rather than calculating the gradients using automatic differentiation. ''' if hasattr(self, 'grads'): grads = self.grads else: grads = K.gradients(loss, params) if hasattr(self, 'clipnorm') and self.clipnorm > 0: norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads])) grads = [clip_norm(g, self.clipnorm, norm) for g in grads] if hasattr(self, 'clipvalue') and self.clipvalue > 0: grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads] return grads
Example #22
Source File: test_deform_conv.py From deform-conv with MIT License | 5 votes |
def test_tf_batch_map_offsets_grad(): np.random.seed(42) input = np.random.random((4, 100, 100)) offsets = np.random.random((4, 100, 100, 2)) * 2 input = K.variable(input) offsets = K.variable(offsets) tf_mapped_vals = tf_batch_map_offsets(input, offsets) grad = K.gradients(tf_mapped_vals, input)[0] grad = K.eval(grad) assert not np.allclose(grad, 0)
Example #23
Source File: callbacks.py From deform-conv with MIT License | 5 votes |
def set_model(self, model): self.model = model self.sess = K.get_session() total_loss = self.model.total_loss if self.histogram_freq and self.merged is None: for layer in self.model.layers: for weight in layer.weights: # dense_1/bias:0 > dense_1/bias_0 name = weight.name.replace(':', '_') tf.summary.histogram(name, weight) tf.summary.histogram( '{}_gradients'.format(name), K.gradients(total_loss, [weight])[0] ) if self.write_images: w_img = tf.squeeze(weight) shape = w_img.get_shape() if len(shape) > 1 and shape[0] > shape[1]: w_img = tf.transpose(w_img) if len(shape) == 1: w_img = tf.expand_dims(w_img, 0) w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1) tf.summary.image(name, w_img) if hasattr(layer, 'output'): tf.summary.histogram('{}_out'.format(layer.name), layer.output) self.merged = tf.summary.merge_all() if self.write_graph: self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph) else: self.writer = tf.summary.FileWriter(self.log_dir)
Example #24
Source File: critic.py From Python-Reinforcement-Learning-Projects with MIT License | 5 votes |
def build_model(self): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" # Define input layers states = layers.Input(shape=(self.state_size,), name='states') actions = layers.Input(shape=(self.action_size,), name='actions') net_states = layers.Dense(units=16,kernel_regularizer=layers.regularizers.l2(1e-6))(states) net_states = layers.BatchNormalization()(net_states) net_states = layers.Activation("relu")(net_states) net_states = layers.Dense(units=32, kernel_regularizer=layers.regularizers.l2(1e-6))(net_states) net_actions = layers.Dense(units=32,kernel_regularizer=layers.regularizers.l2(1e-6))(actions) net = layers.Add()([net_states, net_actions]) net = layers.Activation('relu')(net) Q_values = layers.Dense(units=1, name='q_values',kernel_initializer=layers.initializers.RandomUniform(minval=-0.003, maxval=0.003))(net) self.model = models.Model(inputs=[states, actions], outputs=Q_values) optimizer = optimizers.Adam(lr=0.001) self.model.compile(optimizer=optimizer, loss='mse') action_gradients = K.gradients(Q_values, actions) self.get_action_gradients = K.function( inputs=[*self.model.input, K.learning_phase()], outputs=action_gradients)
Example #25
Source File: stylegan.py From StyleGAN-Keras with MIT License | 5 votes |
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight): gradients = K.gradients(y_pred, averaged_samples)[0] gradients_sqr = K.square(gradients) gradient_penalty = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # weight * ||grad||^2 # Penalize the gradient norm return K.mean(gradient_penalty * weight) #Upsample, Convolution, AdaIN, Noise, Activation, Convolution, AdaIN, Noise, Activation
Example #26
Source File: mixed-stylegan.py From StyleGAN-Keras with MIT License | 5 votes |
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight): gradients = K.gradients(y_pred, averaged_samples)[0] gradients_sqr = K.square(gradients) gradient_penalty = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # weight * ||grad||^2 # Penalize the gradient norm return K.mean(gradient_penalty * weight) #Upsample, Convolution, AdaIN, Noise, Activation, Convolution, AdaIN, Noise, Activation
Example #27
Source File: grad_cam.py From emotion_recognition with MIT License | 5 votes |
def compile_saliency_function(model, activation_layer='conv2d_7'): input_image = model.input layer_output = model.get_layer(activation_layer).output max_output = K.max(layer_output, axis=3) saliency = K.gradients(K.sum(max_output), input_image)[0] return K.function([input_image, K.learning_phase()], [saliency])
Example #28
Source File: grad_cam.py From emotion_recognition with MIT License | 5 votes |
def compile_gradient_function(input_model, category_index, layer_name): model = Sequential() model.add(input_model) num_classes = model.output_shape[1] target_layer = lambda x: target_category_loss(x, category_index, num_classes) model.add(Lambda(target_layer, output_shape = target_category_loss_output_shape)) loss = K.sum(model.layers[-1].output) conv_output = model.layers[0].get_layer(layer_name).output gradients = normalize(K.gradients(loss, conv_output)[0]) gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, gradients]) return gradient_function
Example #29
Source File: layers.py From Keras-progressive_growing_of_gans with MIT License | 5 votes |
def call(self, inputs): target, wrt = inputs grads = K.gradients(target, wrt) assert len(grads) == 1 grad = grads[0] return K.sqrt(K.sum(K.batch_flatten(K.square(grad)), axis=1, keepdims=True))
Example #30
Source File: attack_utils.py From blackbox-attacks with MIT License | 5 votes |
def gen_grad_ens(x, logits, y): adv_loss = K.categorical_crossentropy(logits[0], y, from_logits=True) if len(logits) >= 1: for i in range(1, len(logits)): adv_loss += K.categorical_crossentropy(logits[i], y, from_logits=True) grad = K.gradients(adv_loss, [x])[0] return adv_loss, grad