Python keras.backend.cumsum() Examples

The following are 16 code examples of keras.backend.cumsum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: self_attention.py    From nlp_toolkit with MIT License 6 votes vote down vote up
def Mask(self, inputs, seq_len, mode='mul'):
        """
        # Arguments:
            inputs: input tensor with shape (batch_size, seq_len, input_size)
            seq_len: Each sequence's actual length with shape (batch_size,)
            mode:
                mul: mask the rest dim with zero, used before fully-connected layer
                add: subtract a big constant from the rest, used before softmax layer
        # Reutrns:
            Masked tensors with the same shape of input tensor
        """
        if seq_len is None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12 
Example #2
Source File: position_embedding.py    From nlp_toolkit with MIT License 6 votes vote down vote up
def call(self, x):
        if (self.size is None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000.,
                                2 * K.arange(self.size / 2, dtype='float32'
                                             ) / self.size)
        position_j = K.expand_dims(position_j, 0)
        # K.arange不支持变长,只好用这种方法生成
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
Example #3
Source File: tgru_k2_gpu.py    From chemical_vae with Apache License 2.0 6 votes vote down vote up
def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output 
Example #4
Source File: model.py    From attention-is-all-you-need-keras with MIT License 5 votes vote down vote up
def _get_pos_seq(x, null_token_value=0):
    mask = K.cast(K.not_equal(x, null_token_value), 'float32')
    pos = K.cumsum(K.ones_like(x, 'float32'), 1)
    return pos * mask 
Example #5
Source File: pretrain_inception_resnet.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #6
Source File: train_nasnet_mobile.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #7
Source File: pretrain_nasnet_mobile.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #8
Source File: train_inception_resnet.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #9
Source File: pretrain_nasnet_large.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #10
Source File: train_mobilenet.py    From neural-image-assessment with MIT License 5 votes vote down vote up
def earth_mover_loss(y_true, y_pred):
    cdf_ytrue = K.cumsum(y_true, axis=-1)
    cdf_ypred = K.cumsum(y_pred, axis=-1)
    samplewise_emd = K.sqrt(K.mean(K.square(K.abs(cdf_ytrue - cdf_ypred)), axis=-1))
    return K.mean(samplewise_emd) 
Example #11
Source File: positional_encoder.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        # pylint: disable=redefined-variable-type

        # This section implements the positional encoder on all the vectors at once.
        # The general idea is to use ones matrices in the shape of `inputs` to create indexes per
        # word.

        if mask is None:
            ones_like_x = K.ones_like(inputs)
        else:
            float_mask = K.cast(mask, 'float32')
            ones_like_x = K.ones_like(inputs) * K.expand_dims(float_mask, 2)

        # This is an odd way to get the number of words(ie the first dimension of inputs).
        # However, if the input is masked, using the dimension directly does not
        # equate to the correct number of words. We fix this by adding up a relevant
        # row of ones which has been masked if required.
        masked_m = K.expand_dims(K.sum(ones_like_x, 1), 1)

        if mask is None:
            one_over_m = ones_like_x / masked_m
            j_index = K.cumsum(ones_like_x, 1)
        else:
            one_over_m = switch(ones_like_x, ones_like_x/masked_m, K.zeros_like(ones_like_x))

            j_index = K.cumsum(ones_like_x, 1) * K.expand_dims(float_mask, 2)

        k_over_d = K.cumsum(ones_like_x, 2) * 1.0/K.cast(K.shape(inputs)[2], 'float32')

        l_weighting_vectors = (ones_like_x - (j_index * one_over_m)) - \
                              (k_over_d * (ones_like_x - 2 * j_index * one_over_m))

        return K.sum(l_weighting_vectors * inputs, 1) 
Example #12
Source File: envelope.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        span_begin, span_end = inputs
        after_span_begin = K.cumsum(span_begin, axis=-1)
        after_span_end = K.cumsum(span_end, axis=-1)
        before_span_end = 1.0 - after_span_end
        return after_span_begin * before_span_end 
Example #13
Source File: core.py    From transformer-keras with Apache License 2.0 5 votes vote down vote up
def sequence_mask(seq):
    """

    :param seq: shape of [N, T_q]
    :return:
    """
    seq_len = K.shape(seq)[1]
    batch_size = K.shape(seq)[:1]
    return K.cast(K.cumsum(tf.eye(seq_len, batch_shape=batch_size), axis=1), dtype='float32') 
Example #14
Source File: core.py    From transformer-keras with Apache License 2.0 5 votes vote down vote up
def get_pos_seq(self, x):
        mask = K.cast(K.not_equal(x, 0), dtype="int32")
        pos = K.cumsum(K.ones_like(x, dtype='int32'), axis=1)
        return mask * pos 
Example #15
Source File: attention.py    From Self-Attention-Keras with Apache License 2.0 5 votes vote down vote up
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持变长,只好用这种方法生成
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
Example #16
Source File: attention.py    From Self-Attention-Keras with Apache License 2.0 5 votes vote down vote up
def Mask(self, inputs, seq_len, mode='mul'):
        if seq_len == None:
            return inputs
        else:
            mask = K.one_hot(seq_len[:, 0], K.shape(inputs)[1])
            mask = 1 - K.cumsum(mask, 1)
            for _ in range(len(inputs.shape) - 2):
                mask = K.expand_dims(mask, 2)
            if mode == 'mul':
                return inputs * mask
            if mode == 'add':
                return inputs - (1 - mask) * 1e12