Python keras.backend.random_uniform() Examples

The following are 23 code examples of keras.backend.random_uniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: metrics.py    From voxelmorph with GNU General Public License v3.0 6 votes vote down vote up
def loss(self, y_true, y_pred):

        # get the value for the true and fake images
        disc_true = self.disc(y_true)
        disc_pred = self.disc(y_pred)

        # sample a x_hat by sampling along the line between true and pred
        # z = tf.placeholder(tf.float32, shape=[None, 1])
        # shp = y_true.get_shape()[0]
        # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
        # self.batch_size does not work, since it's not None!!!
        alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
        diff = y_pred - y_true
        interp = y_true + alpha * diff

        # take gradient of D(x_hat)
        gradients = K.gradients(self.disc(interp), [interp])[0]
        grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))

        # compute loss
        return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen 
Example #2
Source File: temporal_mean_rate_theano.py    From snn_toolbox with MIT License 6 votes vote down vote up
def softmax_activation(self, mem):
        """Softmax activation."""

        # spiking_samples = k.less_equal(k.random_uniform([self.config.getint(
        #     'simulation', 'batch_size'), 1]), 300 * self.dt / 1000.)
        # spiking_neurons = k.T.repeat(spiking_samples, 10, axis=1)
        # activ = k.T.nnet.softmax(mem)
        # max_activ = k.max(activ, axis=1, keepdims=True)
        # output_spikes = k.equal(activ, max_activ).astype(k.floatx())
        # output_spikes = k.T.set_subtensor(output_spikes[k.equal(
        #     spiking_neurons, 0).nonzero()], 0.)
        # new_and_reset_mem = k.T.set_subtensor(mem[spiking_neurons.nonzero()],
        #                                       0.)
        # self.add_update([(self.mem, new_and_reset_mem)])
        # return output_spikes

        return k.T.mul(k.less_equal(k.random_uniform(mem.shape),
                                    k.softmax(mem)), self.v_thresh) 
Example #3
Source File: per_sample_dropout.py    From perfect_match with MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        def dropped_inputs():
            keep_prob = 1. - self.rate
            tile_shape = tf.expand_dims(tf.shape(inputs)[-1], axis=0)
            tiled_keep_prob = K.tile(keep_prob, tile_shape)
            keep_prob = tf.transpose(K.reshape(tiled_keep_prob, [tile_shape[0], tf.shape(keep_prob)[0]]))
            binary_tensor = tf.floor(keep_prob + K.random_uniform(shape=tf.shape(inputs)))
            return inputs * binary_tensor
        return K.in_train_phase(dropped_inputs, inputs,
                                training=training) 
Example #4
Source File: tgru_k2_gpu.py    From chemical_vae with Apache License 2.0 5 votes vote down vote up
def get_initial_states(self, x):
        # build an all-zero tensor of shape [(samples, output_dim), (samples, output_dim)]
        initial_state = K.zeros_like(x)  # (samples, timesteps, input_dim)
        initial_state = K.sum(initial_state, axis=1)  # (samples, input_dim)
        reducer = K.random_uniform((self.input_dim, self.units))
        reducer = reducer / K.exp(reducer)

        initial_state = K.dot(initial_state, reducer)  # (samples, output_dim)
        initial_states = [K.stack([initial_state, initial_state]) for _ in range(len(self.states))]
        return initial_states 
Example #5
Source File: cyclegan.py    From MLPrimitives with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        alpha = K.random_uniform((64, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
Example #6
Source File: samplers.py    From DeepIV with MIT License 5 votes vote down vote up
def random_laplace(shape, mu=0., b=1.):
    '''
    Draw random samples from a Laplace distriubtion.

    See: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution
    '''
    U = K.random_uniform(shape, -0.5, 0.5)
    return mu - b * K.sign(U) * K.log(1 - 2 * K.abs(U)) 
Example #7
Source File: wide_residual_network.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def _dense_kernel_initializer(shape, dtype=None):
    fan_in, fan_out = _compute_fans(shape)
    stddev = 1. / np.sqrt(fan_in)
    return K.random_uniform(shape, -stddev, stddev, dtype) 
Example #8
Source File: adversarial_utils.py    From keras-adversarial with MIT License 5 votes vote down vote up
def uniform_latent_sampling(latent_shape, low=0.0, high=1.0):
    """
    Sample from uniform distribution
    :param latent_shape: batch shape
    :return: normal samples, shape=(n,)+latent_shape
    """
    return Lambda(lambda x: K.random_uniform((K.shape(x)[0],) + latent_shape, low, high),
                  output_shape=lambda x: ((x[0],) + latent_shape)) 
Example #9
Source File: resnet_wgan_gp_cifar10_train.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #10
Source File: train_wgan_gp.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #11
Source File: train_wgan_gp.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #12
Source File: train_wgan_gp.py    From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #13
Source File: layers.py    From Keras-progressive_growing_of_gans with MIT License 5 votes vote down vote up
def __init__(self,mode='mul', strength=0.4, axes=(0,3), normalize=False,**kwargs):
        super(GDropLayer,self).__init__(**kwargs)
        assert mode in ('drop', 'mul', 'prop')
        #self.random     = K.random_uniform(1, minval=1, maxval=2147462579, dtype=tf.float32, seed=None, name=None)
        self.mode       = mode
        self.strength   = strength
        self.axes       = [axes] if isinstance(axes, int) else list(axes)
        self.normalize  = normalize # If true, retain overall signal variance.
        self.gain       = None      # For experimentation. 
Example #14
Source File: custom_layers.py    From inpainting-gmcnn-keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
    weights = K.random_uniform((1, 1, 1, 1))
    return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #15
Source File: core.py    From gandlf with MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        sims = []
        for n, sim in zip(self.n, self.similarities):
            for _ in range(n):
                batch_size = K.shape(x)[0]
                idx = K.random_uniform((batch_size,), low=0, high=batch_size,
                                       dtype='int32')
                x_shuffled = K.gather(x, idx)
                pair_sim = sim(x, x_shuffled)
                for _ in range(K.ndim(x) - 1):
                    pair_sim = K.expand_dims(pair_sim, dim=1)
                sims.append(pair_sim)

        return K.concatenate(sims, axis=-1) 
Example #16
Source File: improved_wgan.py    From keras-contrib with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #17
Source File: run_wgan-gp_se.py    From se_relativisticgan with MIT License 5 votes vote down vote up
def _merge_function (self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #18
Source File: run_rsgan-gp_se.py    From se_relativisticgan with MIT License 5 votes vote down vote up
def _merge_function (self, inputs):
        weights = K.random_uniform((BATCH_SIZE, 1, 1))
        return (weights * inputs[0]) + ((1 - weights) * inputs[1]) 
Example #19
Source File: StarGAN.py    From StarGAN-Keras with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        alpha = K.random_uniform((self.bs, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
Example #20
Source File: custom_objects.py    From keras-efficientnets with MIT License 5 votes vote down vote up
def call(self, inputs, training=None):

        def drop_connect():
            keep_prob = 1.0 - self.drop_connect_rate

            # Compute drop_connect tensor
            batch_size = tf.shape(inputs)[0]
            random_tensor = keep_prob
            random_tensor += K.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
            binary_tensor = tf.floor(random_tensor)
            output = (inputs / keep_prob) * binary_tensor
            return output

        return K.in_train_phase(drop_connect, inputs, training=training) 
Example #21
Source File: custom_objects.py    From keras-efficientnets with MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return K.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example #22
Source File: wgan_gp.py    From Keras-GAN with MIT License 5 votes vote down vote up
def _merge_function(self, inputs):
        alpha = K.random_uniform((32, 1, 1, 1))
        return (alpha * inputs[0]) + ((1 - alpha) * inputs[1]) 
Example #23
Source File: temporal_mean_rate_theano.py    From snn_toolbox with MIT License 4 votes vote down vote up
def init_membrane_potential(self, output_shape=None, mode='zero'):
        """Initialize membrane potential.

        Helpful to avoid transient response in the beginning of the simulation.
        Not needed when reset between frames is turned off, e.g. with a video
        data set.

        Parameters
        ----------

        output_shape: Optional[tuple]
            Output shape
        mode: str
            Initialization mode.

            - ``'uniform'``: Random numbers from uniform distribution in
              ``[-thr, thr]``.
            - ``'bias'``: Negative bias.
            - ``'zero'``: Zero (default).

        Returns
        -------

        init_mem: ndarray
            A tensor of ``self.output_shape`` (same as layer).
        """

        if output_shape is None:
            output_shape = self.output_shape

        if mode == 'uniform':
            init_mem = k.random_uniform(output_shape,
                                        -self._v_thresh, self._v_thresh)
        elif mode == 'bias':
            init_mem = np.zeros(output_shape, k.floatx())
            if hasattr(self, 'b'):
                b = self.get_weights()[1]
                for i in range(len(b)):
                    init_mem[:, i, Ellipsis] = -b[i]
        else:  # mode == 'zero':
            init_mem = np.zeros(output_shape, k.floatx())
        return init_mem