Python keras.backend.zeros_like() Examples

The following are 30 code examples of keras.backend.zeros_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: load_trained_models.py    From Coloring-greyscale-images with MIT License 6 votes vote down vote up
def __init__(self,
                 resource_path='./resources/',
                 learning_rate=0.0002,
                 decay_rate=2e-6,
                 gpus = 1):
        
        self.gpus = gpus
        self.learning_rate = learning_rate
        self.decay_rate = decay_rate
        

        def zero_loss(y_true, y_pred):
        	return K.zeros_like(y_true)
               
        discriminator_full = load_model(resource_path + 'discriminator_full.h5', custom_objects={'Conv2D_r': Conv2D_r, 'InstanceNormalization': InstanceNormalization, 'tf': tf, 'zero_loss': zero_loss, 'ConvSN2D': ConvSN2D, 'DenseSN': DenseSN})
        
        discriminator_full.trainable = True
        discriminator_full.name = "discriminator_full"
        
        self.model = discriminator_full
        self.save_model = discriminator_full 
Example #2
Source File: evaluation_metrics.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
    
    #return average f1 score over all classes
    return K.mean(precision) 
Example #3
Source File: crf.py    From keras-contrib with MIT License 6 votes vote down vote up
def call(self, X, mask=None):
        if mask is not None:
            assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'

        if self.test_mode == 'viterbi':
            test_output = self.viterbi_decoding(X, mask)
        else:
            test_output = self.get_marginal_prob(X, mask)

        self.uses_learning_phase = True
        if self.learn_mode == 'join':
            train_output = K.zeros_like(K.dot(X, self.kernel))
            out = K.in_train_phase(train_output, test_output)
        else:
            if self.test_mode == 'viterbi':
                train_output = self.get_marginal_prob(X, mask)
                out = K.in_train_phase(train_output, test_output)
            else:
                out = test_output
        return out 
Example #4
Source File: model_loss.py    From speech_separation with MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example #5
Source File: multi_perspective_layer.py    From MatchZoo with Apache License 2.0 6 votes vote down vote up
def collect_final_step_of_lstm(lstm_representation, lengths):
    """
    Collect final step of lstm.

    :param lstm_representation: [batch_size, len_rt, dim]
    :param lengths: [batch_size]
    :return: [batch_size, dim]
    """
    lengths = tf.maximum(lengths, K.zeros_like(lengths))

    batch_size = tf.shape(lengths)[0]
    # shape (batch_size)
    batch_nums = tf.range(0, limit=batch_size)
    # shape (batch_size, 2)
    indices = tf.stack((batch_nums, lengths), axis=1)
    result = tf.gather_nd(lstm_representation, indices,
                            name='last-forwar-lstm')
    # [batch_size, dim]
    return result 
Example #6
Source File: evaluation_metrics_tf.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # return average f1 score over all classes
    return K.mean(precision) 
Example #7
Source File: evaluation_metrics_tf.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def f1_score_taskB(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class 
Example #8
Source File: evaluation_metrics_theano.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)
    
    #return average f1 score over all classes
    return K.mean(precision) 
Example #9
Source File: evaluation_metrics_theano.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0 
Example #10
Source File: evaluation_metrics_theano.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def f1_score_taskB(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class 
Example #11
Source File: evaluation_metrics_theano.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def f1_score_keras(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return K.mean(f1_class) 
Example #12
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 6 votes vote down vote up
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values 
Example #13
Source File: ChainCRF.py    From elmo-bilstm-cnn-crf with Apache License 2.0 6 votes vote down vote up
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x 
Example #14
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 6 votes vote down vote up
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
    '''Given the observations x, it adds the start boundary energy b_start (resp.
    end boundary energy b_end on the start (resp. end) elements and multiplies
    the mask.'''
    if mask is None:
        if b_start is not None:
            x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
        if b_end is not None:
            x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
    else:
        mask = K.cast(mask, K.floatx())
        mask = K.expand_dims(mask, 2)
        x *= mask
        if b_start is not None:
            mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
            start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
            x = x + start_mask * b_start
        if b_end is not None:
            mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
            end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
            x = x + end_mask * b_end
    return x 
Example #15
Source File: ChainCRF.py    From elmo-bilstm-cnn-crf with Apache License 2.0 6 votes vote down vote up
def _forward(x, reduce_step, initial_states, U, mask=None):
    '''Forward recurrence of the linear chain crf.'''

    def _forward_step(energy_matrix_t, states):
        alpha_tm1 = states[-1]
        new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
        return new_states[0], new_states

    U_shared = K.expand_dims(K.expand_dims(U, 0), 0)

    if mask is not None:
        mask = K.cast(mask, K.floatx())
        mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
        U_shared = U_shared * mask_U

    inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
    inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)

    last, values, _ = K.rnn(_forward_step, inputs, initial_states)
    return last, values 
Example #16
Source File: ChainCRF.py    From elmo-bilstm-cnn-crf with Apache License 2.0 6 votes vote down vote up
def _backward(gamma, mask):
    '''Backward recurrence of the linear chain crf.'''
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
Example #17
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 6 votes vote down vote up
def _backward(gamma, mask):
    '''Backward recurrence of the linear chain crf.'''
    gamma = K.cast(gamma, 'int32')

    def _backward_step(gamma_t, states):
        y_tm1 = K.squeeze(states[0], 0)
        y_t = batch_gather(gamma_t, y_tm1)
        return y_t, [K.expand_dims(y_t, 0)]

    initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
    _, y_rev, _ = K.rnn(_backward_step,
                        gamma,
                        initial_states,
                        go_backwards=True)
    y = K.reverse(y_rev, 1)

    if mask is not None:
        mask = K.cast(mask, dtype='int32')
        # mask output
        y *= mask
        # set masked values to -1
        y += -(1 - mask)
    return y 
Example #18
Source File: capsule.py    From Keras-TextClassification with MIT License 6 votes vote down vote up
def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]
        outputs = None
        for i in range(self.routings):
            b = K.permute_dimensions(b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs 
Example #19
Source File: models.py    From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License 6 votes vote down vote up
def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            b = K.permute_dimensions(b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs 
Example #20
Source File: load_trained_models.py    From Coloring-greyscale-images with MIT License 6 votes vote down vote up
def __init__(self,
                 resource_path='./resources/',
                 learning_rate=0.0002,
                 decay_rate=2e-6,
                 gpus = 0):
        
        self.gpus = gpus
        self.learning_rate = learning_rate
        self.decay_rate = decay_rate
        
        def zero_loss(y_true, y_pred):
        	return K.zeros_like(y_true)
       
        discriminator_low = load_model(resource_path + 'discriminator_low.h5', custom_objects={'Conv2D_r': Conv2D_r, 'InstanceNormalization': InstanceNormalization, 'tf': tf,'zero_loss': zero_loss, 'ConvSN2D': ConvSN2D, 'DenseSN': DenseSN})
        
        discriminator_low.trainable = True
        discriminator_low.name = "discriminator_low"

        self.model = discriminator_low
        self.save_model = discriminator_low 
Example #21
Source File: evaluation_metrics_theano.py    From deep-mlsa with Apache License 2.0 6 votes vote down vote up
def f1_score_task3(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class[1] 
Example #22
Source File: crf.py    From keras-contrib with MIT License 5 votes vote down vote up
def shift_right(x, offset=1):
        assert offset > 0
        return K.concatenate([K.zeros_like(x[:, :offset]), x[:, :-offset]], axis=1) 
Example #23
Source File: network.py    From ecg with GNU General Public License v3.0 5 votes vote down vote up
def resnet_block(
        layer,
        num_filters,
        subsample_length,
        block_index,
        **params):
    from keras.layers import Add 
    from keras.layers import MaxPooling1D
    from keras.layers.core import Lambda

    def zeropad(x):
        y = K.zeros_like(x)
        return K.concatenate([x, y], axis=2)

    def zeropad_output_shape(input_shape):
        shape = list(input_shape)
        assert len(shape) == 3
        shape[2] *= 2
        return tuple(shape)

    shortcut = MaxPooling1D(pool_size=subsample_length)(layer)
    zero_pad = (block_index % params["conv_increase_channels_at"]) == 0 \
        and block_index > 0
    if zero_pad is True:
        shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(shortcut)

    for i in range(params["conv_num_skip"]):
        if not (block_index == 0 and i == 0):
            layer = _bn_relu(
                layer,
                dropout=params["conv_dropout"] if i > 0 else 0,
                **params)
        layer = add_conv_weight(
            layer,
            params["conv_filter_length"],
            num_filters,
            subsample_length if i == 0 else 1,
            **params)
    layer = Add()([shortcut, layer])
    return layer 
Example #24
Source File: train.py    From BERT-keras with GNU General Public License v3.0 5 votes vote down vote up
def _mask_loss(y_true, y_pred, y_mask, element_wise_loss):
    l = K.switch(y_mask, element_wise_loss(y_true, y_pred), K.zeros_like(y_mask, dtype=K.floatx()))
    return K.sum(l) / (K.cast(K.sum(y_mask), dtype='float32') + K.epsilon()) 
Example #25
Source File: deep_residual_learning_blocks.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def zeropad(x):
    y = K.zeros_like(x)
    return K.concatenate([x, y], axis=1) 
Example #26
Source File: transform_rnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def _trans(theta):
    tx = theta[:,3:4]
    ty = theta[:,4:5]
    tz = theta[:,5:6]
    zero = K.zeros_like(tx)
    one = K.ones_like(tx)
    first = K.reshape(K.concatenate([one,zero,zero,tx],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,one,zero,ty],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,zero,one,tz],axis=1),(-1,1,4))
    trans = K.concatenate([first,second,third],axis=1)
    trans = trans.reshape((trans.shape[0],3,4))

    return trans 
Example #27
Source File: head.py    From NTM-Keras with MIT License 5 votes vote down vote up
def batch_reading(head_num, memory_size, memory_dim, memory_t, weight_t):
    """
    Reading memory.
    :param head_num:
    :param memory_size:
    :param memory_dim:
    :param memory_t: the $N \times M$ memory matrix at time $t$, where $N$
    is the number of memory locations, and $M$ is the vector size at each
    location.
    :param weight_t: $w_t$ is a vector of weightings over the $N$ locations
    emitted by a reading head at time $t$.

    Since all weightings are normalized, the $N$ elements $w_t(i)$ of
    $\textbf{w}_t$ obey the following constraints:
    $$\sum_{i=1}^{N} w_t(i) = 1, 0 \le w_t(i) \le 1,\forall i$$

    The length $M$ read vector $r_t$ returned by the head is defined as a
    convex combination of the row-vectors $M_t(i)$ in memory:
    $$\textbf{r}_t \leftarrow \sum_{i=1}^{N}w_t(i)\textbf{M}_t(i)$$
    :return: the content reading from memory.
    """
    r_t_list = K.zeros_like((head_num * memory_dim, 1))

    for i in xrange(head_num):
        begin = i * memory_size
        end = begin + memory_size
        r_t = reading(memory_t, weight_t[begin:end])
        r_t_list[begin:end] = r_t

    return r_t_list 
Example #28
Source File: Seq2Seq.py    From dts with MIT License 5 votes vote down vote up
def _format_encoder_states(self, encoder_states, use_first=True):
        """
        Format the encoder states in such a way that only the last state from the first layer of the encoder
        is used to init the first layer of the decoder.
        If the cell type used is LSTM then both c and h are kept.
        :param encoder_states: Keras.tensor
            (last) hidden state of the decoder
        :param use_first: bool
            if True use only the last hidden state from first layer of the encoder, while the other are init to zero.
            if False use last hidden state for all layers
        :return:
            masked encoder states
        """
        if use_first:
            # Keras version 2.1.4 has encoder states reversed w.r.t later versions
            if keras.__version__ < '2.2':
                if self.cell == 'lstm':
                    encoder_states = [Lambda(lambda x: K.zeros_like(x))(s) for s in encoder_states[:-2]] + [
                        encoder_states[-2]]
                else:
                    encoder_states = [Lambda(lambda x: K.zeros_like(x))(s) for s in encoder_states[:-1]] + [
                        encoder_states[-1]]
            else:
                if self.cell == 'lstm':
                    encoder_states = encoder_states[:2] + [Lambda(lambda x: K.zeros_like(x))(s) for s in
                                                                encoder_states[2:]]
                else:
                    encoder_states = encoder_states[:1] + [Lambda(lambda x: K.zeros_like(x))(s) for s in
                                                                encoder_states[1:]]
        return encoder_states 
Example #29
Source File: seg_util.py    From aitom with GNU General Public License v3.0 5 votes vote down vote up
def weighted_mean_squared_error(y_true, y_pred, weights):
    mask = K.zeros_like(y_true)
    for lbl in weights:     mask += K.cast(K.equal(y_true, lbl), dtype='float32') * (K.zeros_like(y_true) + weights[lbl])      # An adhoc way to solve the problem of: tensor object does not support item assignment
    return K.mean(K.square(y_pred - y_true)*mask, axis=-1) 
Example #30
Source File: deep_residual_learning_blocks.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def zeropad(x):
    y = K.zeros_like(x)
    return K.concatenate([x, y], axis=1)