Python tensorflow.keras.backend.random_normal() Examples
The following are 11
code examples of tensorflow.keras.backend.random_normal().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: layers.py From neuron with GNU General Public License v3.0 | 7 votes |
def _sample(self, args): """ sample from a normal distribution args should be [mu, log_var], where log_var is the log of the squared sigma This is probably equivalent to K.random_normal(shape, args[0], exp(args[1]/2.0)) """ mu, log_var = args # sample from N(0, 1) noise = tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32) # make it a sample from N(mu, sigma^2) z = mu + tf.exp(log_var/2.0) * noise return z
Example #2
Source File: vae-cnn-mnist-8.1.2.py From Advanced-Deep-Learning-with-Keras with MIT License | 7 votes |
def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. # Arguments: args (tensor): mean and log of variance of Q(z|X) # Returns: z (tensor): sampled latent vector """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #3
Source File: vae-mlp-mnist-8.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def sampling(args): """Reparameterization trick by sampling fr an isotropic unit Gaussian. # Arguments: args (tensor): mean and log of variance of Q(z|X) # Returns: z (tensor): sampled latent vector """ z_mean, z_log_var = args # K is the keras backend batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #4
Source File: cvae-cnn-mnist-8.2.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def sampling(args): """Implements reparameterization trick by sampling from a gaussian with zero mean and std=1. Arguments: args (tensor): mean and log of variance of Q(z|X) Returns: sampled latent vector (tensor) """ z_mean, z_log_var = args batch = K.shape(z_mean)[0] dim = K.int_shape(z_mean)[1] # by default, random_normal has mean=0 and std=1.0 epsilon = K.random_normal(shape=(batch, dim)) return z_mean + K.exp(0.5 * z_log_var) * epsilon
Example #5
Source File: cmvae.py From AirSim-Drone-Racing-VAE-Imitation with MIT License | 6 votes |
def call(self, x, mode): # Possible modes for reconstruction: # 0: img -> img + gate # 1: img -> img # 2: img -> gate x = self.q_img(x) means = self.mean_params(x) stddev = tf.math.exp(0.5 * self.stddev_params(x)) eps = random_normal(tf.shape(stddev)) z = means + eps * stddev if mode == 0: img_recon = self.p_img(z) gate_recon = self.p_gate(z) return img_recon, gate_recon, means, stddev, z elif mode == 1: img_recon = self.p_img(z) gate_recon = False return img_recon, gate_recon, means, stddev, z elif mode == 2: img_recon = False gate_recon = self.p_gate(z) return img_recon, gate_recon, means, stddev, z
Example #6
Source File: cmvae.py From AirSim-Drone-Racing-VAE-Imitation with MIT License | 6 votes |
def call(self, x, mode): # Possible modes for reconstruction: # 0: img -> img + gate # 1: img -> img # 2: img -> gate x = self.q_img(x) means = self.mean_params(x) stddev = tf.math.exp(0.5 * self.stddev_params(x)) eps = random_normal(tf.shape(stddev)) z = means + eps * stddev r_params, theta_params, psi_params, phi_params = self.extract_gate_params(z) if mode == 0: gate_recon = tf.keras.layers.concatenate([self.p_R(r_params), self.p_Theta(theta_params), self.p_Psi(psi_params), self.p_Phi(phi_params)], axis=1) img_recon = self.p_img(z) return img_recon, gate_recon, means, stddev, z elif mode == 1: img_recon = self.p_img(z) gate_recon = False return img_recon, gate_recon, means, stddev, z elif mode == 2: img_recon = False gate_recon = tf.keras.layers.concatenate([self.p_R(r_params), self.p_Theta(theta_params), self.p_Psi(psi_params), self.p_Phi(phi_params)], axis=1) return img_recon, gate_recon, means, stddev, z
Example #7
Source File: augmentation.py From kapre with MIT License | 5 votes |
def call(self, x): if self.random_gain: noise_x = x + K.random_normal( shape=K.shape(x), mean=0.0, stddev=np.random.uniform(0.0, self.power) ) else: noise_x = x + K.random_normal(shape=K.shape(x), mean=0.0, stddev=self.power) return K.in_train_phase(noise_x, x)
Example #8
Source File: networks.py From brainstorm with MIT License | 5 votes |
def call(self, inputs): if self.n_dims == 2: rand_flow = K.random_normal( shape=tf.convert_to_tensor( [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], self.n_dims]), mean=0., stddev=1., dtype='float32') rand_flow = tf.nn.depthwise_conv2d(rand_flow, self.blur_kernel, strides=[1] * (self.n_dims + 2), padding='SAME') elif self.n_dims == 3: rand_flow = K.random_normal( shape=tf.convert_to_tensor( [tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2], tf.shape(inputs)[3], self.n_dims]), mean=0., stddev=1., dtype='float32') if self.blur_kernel is not None: rand_flow_list = tf.unstack(rand_flow, num=3, axis=-1) flow_chans = [] for c in range(self.n_dims): flow_chan = tf.nn.conv3d(tf.expand_dims(rand_flow_list[c], axis=-1), self.blur_kernel, strides=[1] * (self.n_dims + 2), padding='SAME') flow_chans.append(flow_chan[:, :, :, :, 0]) rand_flow = tf.stack(flow_chans, axis=-1) if self.normalize_max: rand_flow = K.cast( tf.add_n([rand_flow * 0, rand_flow / tf.reduce_max(tf.abs(rand_flow)) * self.flow_sigma]), dtype='float32') else: rand_flow = K.cast(rand_flow * self.flow_sigma, dtype='float32') return rand_flow
Example #9
Source File: cmvae.py From AirSim-Drone-Racing-VAE-Imitation with MIT License | 5 votes |
def encode(self, x): x = self.q_img(x) means = self.mean_params(x) stddev = tf.math.exp(0.5 * self.stddev_params(x)) eps = random_normal(tf.shape(stddev)) z = means + eps * stddev return z, means, stddev
Example #10
Source File: cmvae.py From AirSim-Drone-Racing-VAE-Imitation with MIT License | 5 votes |
def encode(self, x): x = self.q_img(x) means = self.mean_params(x) stddev = tf.math.exp(0.5 * self.stddev_params(x)) eps = random_normal(tf.shape(stddev)) z = means + eps * stddev return z, means, stddev
Example #11
Source File: stylegan_two.py From StyleGAN2-Tensorflow-2.0 with MIT License | 4 votes |
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False): with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: #Get style information w_space = [] pl_lengths = self.pl_mean for i in range(len(style)): w_space.append(self.GAN.S(style[i])) #Generate images generated_images = self.GAN.G(w_space + [noise]) #Discriminate real_output = self.GAN.D(images, training=True) fake_output = self.GAN.D(generated_images, training=True) #Hinge loss function gen_loss = K.mean(fake_output) divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output)) disc_loss = divergence if perform_gp: #R1 gradient penalty disc_loss += gradient_penalty(images, real_output, 10) if perform_pl: #Slightly adjust W space w_space_2 = [] for i in range(len(style)): std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8) w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8)) #Generate from slightly adjusted W space pl_images = self.GAN.G(w_space_2 + [noise]) #Get distance after adjustment (path length) delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3]) pl_lengths = delta_g if self.pl_mean > 0: gen_loss += K.mean(K.square(pl_lengths - self.pl_mean)) #Get gradients for respective areas gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables) #Apply gradients self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables)) self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables)) return disc_loss, gen_loss, divergence, pl_lengths