Python keras.backend.random_normal() Examples

The following are 30 code examples of keras.backend.random_normal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: dagmm.py    From AnomalyDetectionTransformations with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        z, gamma_k = inputs

        gamma_k_sum = K.sum(gamma_k)
        est_phi = K.mean(gamma_k, axis=0)
        est_mu = K.dot(K.transpose(gamma_k), z) / gamma_k_sum
        est_sigma = K.dot(K.transpose(z - est_mu),
                          gamma_k * (z - est_mu)) / gamma_k_sum

        est_sigma = est_sigma + (K.random_normal(shape=(K.int_shape(z)[1], 1), mean=1e-3, stddev=1e-4) * K.eye(K.int_shape(z)[1]))

        self.add_update(K.update(self.phi, est_phi), inputs)
        self.add_update(K.update(self.mu, est_mu), inputs)
        self.add_update(K.update(self.sigma, est_sigma), inputs)

        est_sigma_diag_inv = K.eye(K.int_shape(self.sigma)[0]) / est_sigma
        self.add_loss(self.lambd_diag * K.sum(est_sigma_diag_inv), inputs)

        phi = K.in_train_phase(est_phi, self.phi, training)
        mu = K.in_train_phase(est_mu, self.mu, training)
        sigma = K.in_train_phase(est_sigma, self.sigma, training)
        return GaussianMixtureComponent._calc_component_density(z, phi, mu, sigma) 
Example #2
Source File: attention_decoder.py    From keras-monotonic-attention with GNU Affero General Public License v3.0 6 votes vote down vote up
def _compute_probabilities(self, energy, previous_attention=None):
        if self.is_monotonic:
            # add presigmoid noise to encourage discreteness
            sigmoid_noise = K.in_train_phase(1., 0.)
            noise = K.random_normal(K.shape(energy), mean=0.0, stddev=sigmoid_noise)
            # encourage discreteness in train
            energy = K.in_train_phase(energy + noise, energy)

            p = K.in_train_phase(K.sigmoid(energy),
                                 K.cast(energy > 0, energy.dtype))
            p = K.squeeze(p, -1)
            p_prev = K.squeeze(previous_attention, -1)
            # monotonic attention function from tensorflow
            at = K.in_train_phase(
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'parallel'),
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'hard'))
            at = K.expand_dims(at, -1)
        else:
            # softmax
            at = keras.activations.softmax(energy, axis=1)

        return at 
Example #3
Source File: _vae_keras.py    From scgen with GNU General Public License v3.0 6 votes vote down vote up
def _sample_z(args):
        """
            Samples from standard Normal distribution with shape [size, z_dim] and
            applies re-parametrization trick. It is actually sampling from latent
            space distributions with N(mu, var) computed in `_encoder` function.
            Parameters
            ----------
            No parameters are needed.
            Returns
            -------
            The computed Tensor of samples with shape [size, z_dim].
        """
        mu, log_var = args
        batch_size = K.shape(mu)[0]
        z_dim = K.shape(mu)[1]
        eps = K.random_normal(shape=[batch_size, z_dim])
        return mu + K.exp(log_var / 2) * eps 
Example #4
Source File: example_aae_cifar10.py    From keras-adversarial with MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, units=512, reg=lambda: l1l2(l1=1e-7, l2=1e-7), dropout=0.5):
    k = 5
    x = Input(input_shape)
    h = Convolution2D(units / 4, k, k, border_mode='same', W_regularizer=reg())(x)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = LeakyReLU(0.2)(h)
    h = Flatten()(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = Lambda(lambda (_mu, _lss): _mu + K.random_normal(K.shape(_mu)) * K.exp(_lss / 2),
               output_shape=lambda (_mu, _lss): _mu)([mu, log_sigma_sq])
    return Model(x, z, name="encoder") 
Example #5
Source File: example_bigan.py    From keras-adversarial with MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1l2(1e-5, 0), batch_norm_mode=0):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
              output_shape=lambda x: x[0])
    return Model(x, z, name="encoder") 
Example #6
Source File: vae.py    From pyod with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def sampling(self, args):
        """Reparametrisation by sampling from Gaussian, N(0,I)
        To sample from epsilon = Norm(0,I) instead of from likelihood Q(z|X)
        with latent variables z: z = z_mean + sqrt(var) * epsilon

        Parameters
        ----------
        args : tensor
            Mean and log of variance of Q(z|X).
    
        Returns
        -------
        z : tensor
            Sampled latent variable.
        """

        z_mean, z_log = args
        batch = K.shape(z_mean)[0]  # batch size
        dim = K.int_shape(z_mean)[1]  # latent dimension
        epsilon = K.random_normal(shape=(batch, dim))  # mean=0, std=1.0

        return z_mean + K.exp(0.5 * z_log) * epsilon 
Example #7
Source File: chapter_06_001.py    From Python-Deep-Learning-Second-Edition with MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #8
Source File: example_bigan_unrolled.py    From keras-adversarial with MIT License 6 votes vote down vote up
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1(1e-5), batch_norm_mode=2):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(p[0].shape) * K.exp(p[1] / 2),
              output_shape=lambda x: x[0])
    return Model(x, z, name="encoder") 
Example #9
Source File: aae.py    From Keras-GAN with MIT License 6 votes vote down vote up
def build_encoder(self):
        # Encoder

        img = Input(shape=self.img_shape)

        h = Flatten()(img)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        mu = Dense(self.latent_dim)(h)
        log_var = Dense(self.latent_dim)(h)
        latent_repr = merge([mu, log_var],
                mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
                output_shape=lambda p: p[0])

        return Model(img, latent_repr) 
Example #10
Source File: model.py    From keras-molecules with MIT License 6 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
        h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
        h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
        h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
        h = Flatten(name='flatten_1')(h)
        h = Dense(435, activation = 'relu', name='dense_1')(h)

        def sampling(args):
            z_mean_, z_log_var_ = args
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
        z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)

        def vae_loss(x, x_decoded_mean):
            x = K.flatten(x)
            x_decoded_mean = K.flatten(x_decoded_mean)
            xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
            kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
            return xent_loss + kl_loss

        return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var])) 
Example #11
Source File: chapter_06_001.py    From Python-Deep-Learning-SE with MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #12
Source File: note-generator.py    From Hands-On-Deep-Learning-for-Games with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    seq = K.int_shape(z_mean)[1]
    dim = K.int_shape(z_mean)[2]
    
    epsilon = K.random_normal(shape=(batch, seq, dim))
        
    return z_mean + K.exp(0.5 * z_log_var) * epsilon

# pitches VAE loss 
Example #13
Source File: train_mnist_vae.py    From uncertainty-adversarial-paper with MIT License 5 votes vote down vote up
def define_VAE(optim='adagrad', latent_dim=2):
    inputs = keras.layers.Input(shape=(28, 28, 1))
    x = Flatten()(inputs)
    enc_1 = Dense(400, activation='elu')(x)
    enc_2 = Dense(256, activation='elu')(enc_1)

    z_mu = Dense(latent_dim)(enc_2)
    z_logsigma = Dense(latent_dim)(enc_2)

    encoder = Model(inputs=inputs, outputs=z_mu)  # represent the latent space by the mean

    def sample_z(args):
        mu, logsigma = args
        return 0.5 * K.exp(logsigma / 2) * K.random_normal(shape=(K.shape(mu)[0], latent_dim)) + mu

    z = Lambda(sample_z, output_shape=(latent_dim,))([z_mu, z_logsigma])

    dec_input = keras.layers.Input(shape=(latent_dim,))
    dec_1 = Dense(256, activation='elu')(dec_input)
    dec_2 = Dense(400, activation='elu')(dec_1)
    dec_output = Dense(784, activation='sigmoid')(dec_2)

    dec_reshaped = Reshape((28, 28, 1))(dec_output)
    decoder = Model(inputs=dec_input, outputs=dec_reshaped)

    reconstruction = decoder(z)

    VAE = Model(inputs=inputs, outputs=reconstruction)

    def vae_loss(inputs, reconstruction):
        x = K.flatten(inputs)
        rec = K.flatten(reconstruction)
        x_ent = keras.metrics.binary_crossentropy(x, rec)
        kl_div = 0.5 * K.sum(K.exp(z_logsigma) + K.square(z_mu) - z_logsigma - 1, axis=-1)
        return 28 * 28 * x_ent + kl_div

    VAE.compile(optimizer=optim, loss=vae_loss)

    return VAE, encoder, decoder 
Example #14
Source File: variational_autoencoder.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend 
Example #15
Source File: variational_autoencoder.py    From keras-autoencoder with GNU General Public License v3.0 5 votes vote down vote up
def sampling(args):
        z_mean, z_log_std = args
        epsilon = K.random_normal(shape=(batch_size, latent_dim),
                                  mean=0., std=epsilon_std)
        return z_mean + K.exp(z_log_std) * epsilon

    # note that "output_shape" isn't necessary with the TensorFlow backend
    # so you could write `Lambda(sampling)([z_mean, z_log_std])` 
Example #16
Source File: pitch-generator.py    From Hands-On-Deep-Learning-for-Games with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon

# VAE network loss (reconstruction + KL-divergence) 
Example #17
Source File: samplers.py    From DeepIV with MIT License 5 votes vote down vote up
def random_gmm(pi, mu, sig):
    '''
    Sample from a gaussian mixture model. Returns one sample for each row in
    the pi, mu and sig matrices... this is potentially wasteful (because you have to repeat
    the matrices n times if you want to get n samples), but makes it easy to implment
    code where the parameters vary as they are conditioned on different datapoints.
    '''
    normals = random_normal(K.shape(mu), mu, sig)
    k = random_multinomial(pi)
    return K.sum(normals * k, axis=1, keepdims=True) 
Example #18
Source File: wide_residual_network.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def _conv_kernel_initializer(shape, dtype=None):
    fan_in, fan_out = _compute_fans(shape)
    stddev = np.sqrt(2. / fan_in)
    return K.random_normal(shape, 0., stddev, dtype) 
Example #19
Source File: samplers.py    From DeepIV with MIT License 5 votes vote down vote up
def random_normal(shape, mean=0.0, std=1.0):
    return K.random_normal(shape, mean, std) 
Example #20
Source File: layers.py    From Keras-progressive_growing_of_gans with MIT License 5 votes vote down vote up
def call(self, input,deterministic=False, **kwargs):
        if self.gain is not None:
            input = input * self.gain
        if deterministic or not self.strength:
            return input

        in_shape  = self.input_shape
        in_axes   = range(len(in_shape))
        in_shape  = [in_shape[axis] if in_shape[axis] is not None else input.shape[axis] for axis in in_axes] # None => Theano expr
        rnd_shape = [in_shape[axis] for axis in self.axes]
        broadcast = [self.axes.index(axis) if axis in self.axes else 'x' for axis in in_axes]
        one       = K.constant(1)

        if self.mode == 'drop':
            p = one - self.strength
            rnd = K.random_binomial(tuple(rnd_shape), p=p, dtype=input.dtype) / p

        elif self.mode == 'mul':
            rnd = (one + self.strength) ** K.random_normal(tuple(rnd_shape), dtype=input.dtype)

        elif self.mode == 'prop':
            coef = self.strength * K.constant(np.sqrt(np.float32(self.input_shape[1])))
            rnd = K.random_normal(tuple(rnd_shape), dtype=input.dtype) * coef + one

        else:
            raise ValueError('Invalid GDropLayer mode', self.mode)

        if self.normalize:
            rnd = rnd / K.sqrt(K.mean(rnd ** 2, axis=3, keepdims=True))
        return input * K.permute_dimensions(rnd,broadcast) 
Example #21
Source File: variational_autoencoder_deconv.py    From keras_experiments with The Unlicense 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon


# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])` 
Example #22
Source File: vae_keras.py    From VAE with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon 
Example #23
Source File: variational_autoencoder_deconv.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])` 
Example #24
Source File: variational_autoencoder.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend 
Example #25
Source File: variational_autoencoder_deconv.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])` 
Example #26
Source File: variational_autoencoder.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend 
Example #27
Source File: variational_autoencoder_deconv.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])` 
Example #28
Source File: variational_autoencoder.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend 
Example #29
Source File: variational_autoencoder.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend 
Example #30
Source File: variational_autoencoder_deconv.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0., stddev=epsilon_std)
    return z_mean + K.exp(z_log_var) * epsilon

# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_var])`