Python keras.backend.batch_flatten() Examples

The following are 30 code examples of keras.backend.batch_flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: capsulelayers.py    From CapsNet-Keras with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example #2
Source File: nse.py    From neural-semantic-encoders with Apache License 2.0 6 votes vote down vote up
def compose_and_write_step(self, o_t, states):
        flattened_mem_tm1, flattened_shared_mem_tm1, writer_h_tm1, writer_c_tm1 = states
        input_mem_shape = K.shape(flattened_mem_tm1)
        mem_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
        mem_tm1 = K.reshape(flattened_mem_tm1, mem_shape)
        shared_mem_tm1 = K.reshape(flattened_shared_mem_tm1, mem_shape)
        z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
        shared_z_t, shared_m_rt = self.summarize_memory(o_t, shared_mem_tm1)
        c_t = self.compose_memory_and_output([o_t, m_rt, shared_m_rt])
        # Collecting the necessary variables to directly call writer's step function.
        writer_constants = self.writer.get_constants(c_t)  # returns dropouts for W and U (all 1s, see init)
        writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
        # Making a call to writer's step function, Equation 5
        h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states)  # h_t, writer_c_t: (batch_size, output_dim)
        mem_t = self.update_memory(z_t, h_t, mem_tm1)
        shared_mem_t = self.update_memory(shared_z_t, h_t, shared_mem_tm1)
        return h_t, [K.batch_flatten(mem_t), K.batch_flatten(shared_mem_t), h_t, writer_c_t] 
Example #3
Source File: model.py    From V-GAN with MIT License 6 votes vote down vote up
def discriminator_dummy(img_size, n_filters, init_lr, name='d'):    # naive unet without GAN
    # set image specifics
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]

    inputs = Input((img_height, img_width, img_ch + out_ch))

    d = Model(inputs, inputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L
    
    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
Example #4
Source File: capsulelayers.py    From Multi-level-DCNet with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example #5
Source File: nse.py    From neural-semantic-encoders with Apache License 2.0 6 votes vote down vote up
def read(self, nse_input, input_mask=None):
        '''
        This method produces the 'read' output (equation 1 in the paper) for all timesteps
        and initializes the memory slot mem_0.

        Input: nse_input (batch_size, input_length, input_dim)
        Outputs:
            o (batch_size, input_length, output_dim)
            flattened_mem_0 (batch_size, input_length * output_dim)
 
        While this method simply copies input to mem_0, variants that inherit from this class can do
        something fancier.
        '''
        input_to_read = nse_input
        mem_0 = input_to_read
        flattened_mem_0 = K.batch_flatten(mem_0)
        o = self.reader.call(input_to_read, input_mask)
        o_mask = self.reader.compute_mask(input_to_read, input_mask)
        return o, [flattened_mem_0], o_mask 
Example #6
Source File: gram.py    From subjective-functions with MIT License 6 votes vote down vote up
def l2_diff(octaves, frame_step=1):
    ''' Model which takes the l2 distance between frames frame_step apart'''
    octave_diffs = []
    for frames in octaves:
        # Take the difference between the frames
        out = Lambda(lambda frames, frame_step=frame_step:
                K.batch_flatten(frames - K.concatenate([frames[frame_step:], frames[0:frame_step]], axis=0)))(frames)

        # square
        out = Lambda(lambda x: K.square(x), name=make_name("l2_diff_square"))(out)
        
        # mean instead of sum so we can ignore pixel count
        out = Lambda(lambda x: K.mean(x, axis=1), name=make_name("l2_diff_mean"))(out)

        # sqrt
        out = Lambda(lambda x: K.sqrt(x), name=make_name("l2_diff_sqrt"))(out)

        # (frames,) list of l2 distances

        octave_diffs.append(out)
    return octave_diffs # [(frames, ) ...] list of lists of l2 distances 
Example #7
Source File: gram.py    From subjective-functions with MIT License 6 votes vote down vote up
def lap1_diff(laplacian, frame_step=1):
    ''' Model which takes the lap-1 distance between frames `frame_step` apart
    in the batch '''
    deltas = []
    for i, lap_level in enumerate(laplacian):
        # Take the difference of the Laplacian pyramid of this layer vs. the next
        diff = Lambda(lambda lap_level, frame_step=frame_step:
                K.batch_flatten(
                    lap_level - K.concatenate([lap_level[frame_step:], lap_level[0:frame_step]], axis=0)))(lap_level)
        # scale for good measure
        diff = Lambda(lambda x, scale = 2.**-(i-1): scale*x)(diff)
        #diff = K.batch_flatten(lap_layer - K.concatenate([lap_layer[frame_step:], lap_layer[0:frame_step]], axis=0))
        deltas.append(diff) # diff: (frames, lap-pixels)

    out = keras.layers.concatenate(deltas, axis=1) # (frames, lap-pixels)
    # I use mean here instead of sum to make it more agnostic to total pixel count.
    out = Lambda(lambda x: K.mean(K.abs(x), axis=1))(out) # (frames,)
    return out 
Example #8
Source File: losses.py    From style-transfer with MIT License 6 votes vote down vote up
def gram_matrix(x):
	"""
	Computes the outer-product of the input tensor x.

	Input
	-----
	- x: input tensor of shape (C x H x W)

	Returns
	-------
	- x . x^T

	Note that this can be computed efficiently if x is reshaped
	as a tensor of shape (C x H*W).
	"""
	# assert K.ndim(x) == 3
	if K.image_dim_ordering() == 'th':
		features = K.batch_flatten(x)
	else:
		features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
	return K.dot(features, K.transpose(features)) 
Example #9
Source File: capslayers.py    From deepcaps with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if isinstance(inputs, list):  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example #10
Source File: capsule.py    From Keras-TextClassification with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
Example #11
Source File: models.py    From sam with MIT License 6 votes vote down vote up
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
Example #12
Source File: capsule_layers.py    From SegCaps with Apache License 2.0 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if type(inputs) is list:
            assert len(inputs) == 2
            input, mask = inputs
            _, hei, wid, _, _ = input.get_shape()
            if self.resize_masks:
                mask = tf.image.resize_bicubic(mask, (hei.value, wid.value))
            mask = K.expand_dims(mask, -1)
            if input.get_shape().ndims == 3:
                masked = K.batch_flatten(mask * input)
            else:
                masked = mask * input

        else:
            if inputs.get_shape().ndims == 3:
                x = K.sqrt(K.sum(K.square(inputs), -1))
                mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])
                masked = K.batch_flatten(K.expand_dims(mask, -1) * inputs)
            else:
                masked = inputs

        return masked 
Example #13
Source File: nse.py    From onto-lstm with Apache License 2.0 6 votes vote down vote up
def get_initial_states(self, nse_input, input_mask=None):
        '''
        This method produces the 'read' mask for all timesteps
        and initializes the memory slot mem_0.

        Input: nse_input (batch_size, input_length, input_dim)
        Output: list[Tensors]:
                h_0 (batch_size, output_dim)
                c_0 (batch_size, output_dim)
                flattened_mem_0 (batch_size, input_length * output_dim)
 
        While this method simply copies input to mem_0, variants that inherit from this class can do
        something fancier.
        '''
        input_to_read = nse_input
        mem_0 = input_to_read
        flattened_mem_0 = K.batch_flatten(mem_0)
        initial_states = self.reader.get_initial_states(nse_input)
        initial_states += [flattened_mem_0]
        return initial_states 
Example #14
Source File: nse.py    From onto-lstm with Apache License 2.0 6 votes vote down vote up
def get_initial_states(self, nse_input, input_mask=None):
        '''
        Read input in MMA-NSE will be of shape (batch_size, read_input_length*2, input_dim), a concatenation of
        the actual input to this NSE and the output from a different NSE. The latter will be used to initialize
        the shared memory. The former will be passed to the read LSTM and also used to initialize the current
        memory.
        '''
        input_length = K.shape(nse_input)[1]
        read_input_length = input_length/2
        input_to_read = nse_input[:, :read_input_length, :]
        initial_shared_memory = K.batch_flatten(nse_input[:, read_input_length:, :])
        mem_0 = K.batch_flatten(input_to_read)
        o_mask = self.reader.compute_mask(input_to_read, input_mask)
        reader_states = self.reader.get_initial_states(nse_input)
        initial_states = reader_states + [mem_0, initial_shared_memory]
        return initial_states, o_mask 
Example #15
Source File: SelfAttnGRU.py    From R-NET-in-Keras with MIT License 6 votes vote down vote up
def step(self, inputs, states):
        vP_t = inputs
        hP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks 
        vP, WP_v, WPP_v, v, W_g2 = states[3:8]
        vP_mask, = states[8:]

        WP_v_Dot = K.dot(vP, WP_v)
        WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v)

        s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot)
        s_t = K.dot(s_t_hat, v)
        s_t = K.batch_flatten(s_t)

        a_t = softmax(s_t, mask=vP_mask, axis=1)

        c_t = K.batch_dot(a_t, vP, axes=[1, 1])
        
        GRU_inputs = K.concatenate([vP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g2))
        GRU_inputs = g * GRU_inputs
        
        hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states)

        return hP_t, s 
Example #16
Source File: PointerGRU.py    From R-NET-in-Keras with MIT License 6 votes vote down vote up
def step(self, inputs, states):
        # input
        ha_tm1 = states[0] # (B, 2H)
        _ = states[1:3] # ignore internal dropout/masks
        hP, WP_h, Wa_h, v = states[3:7] # (B, P, 2H)
        hP_mask, = states[7:8]

        WP_h_Dot = K.dot(hP, WP_h) # (B, P, H)
        Wa_h_Dot = K.dot(K.expand_dims(ha_tm1, axis=1), Wa_h) # (B, 1, H)

        s_t_hat = K.tanh(WP_h_Dot + Wa_h_Dot) # (B, P, H)
        s_t = K.dot(s_t_hat, v) # (B, P, 1)
        s_t = K.batch_flatten(s_t) # (B, P)
        a_t = softmax(s_t, mask=hP_mask, axis=1) # (B, P)
        c_t = K.batch_dot(hP, a_t, axes=[1, 1]) # (B, 2H)

        GRU_inputs = c_t
        ha_t, (ha_t_,) = super(PointerGRU, self).step(GRU_inputs, states)
        
        return a_t, [ha_t] 
Example #17
Source File: QuestionPooling.py    From R-NET-in-Keras with MIT License 6 votes vote down vote up
def call(self, inputs, mask=None):
        assert(isinstance(inputs, list) and len(inputs) == 5)
        uQ, WQ_u, WQ_v, v, VQ_r = inputs
        uQ_mask = mask[0] if mask is not None else None

        ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H)
        s_hat = K.dot(uQ, WQ_u)
        s_hat += K.dot(ones, K.dot(WQ_v, VQ_r))
        s_hat = K.tanh(s_hat)
        s = K.dot(s_hat, v)
        s = K.batch_flatten(s)

        a = softmax(s, mask=uQ_mask, axis=1)

        rQ = K.batch_dot(uQ, a, axes=[1, 1])

        return rQ 
Example #18
Source File: attentive_convlstm.py    From sam with MIT License 6 votes vote down vote up
def step(self, x, states):
        x_shape = K.shape(x)
        h_tm1 = states[0]
        c_tm1 = states[1]

        e = self.V_a(K.tanh(self.W_a(h_tm1) + self.U_a(x)))
        a = K.reshape(K.softmax(K.batch_flatten(e)), (x_shape[0], 1, x_shape[2], x_shape[3]))
        x_tilde = x * K.repeat_elements(a, x_shape[1], 1)

        x_i = self.W_i(x_tilde)
        x_f = self.W_f(x_tilde)
        x_c = self.W_c(x_tilde)
        x_o = self.W_o(x_tilde)

        i = self.inner_activation(x_i + self.U_i(h_tm1))
        f = self.inner_activation(x_f + self.U_f(h_tm1))
        c = f * c_tm1 + i * self.activation(x_c + self.U_c(h_tm1))
        o = self.inner_activation(x_o + self.U_o(h_tm1))

        h = o * self.activation(c)
        return h, [h, c] 
Example #19
Source File: QuestionAttnGRU.py    From R-NET-in-Keras with MIT License 6 votes vote down vote up
def step(self, inputs, states):
        uP_t = inputs
        vP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks
        uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9]
        uQ_mask, = states[9:10]

        WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u
        WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v
        WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u

        s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot)

        s_t = K.dot(s_t_hat, v) # v
        s_t = K.batch_flatten(s_t)
        a_t = softmax(s_t, mask=uQ_mask, axis=1)
        c_t = K.batch_dot(a_t, uQ, axes=[1, 1])

        GRU_inputs = K.concatenate([uP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g1))  # W_g1
        GRU_inputs = g * GRU_inputs
        vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states)

        return vP_t, s 
Example #20
Source File: neural_style_transfer.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def gram_matrix(x):
    assert K.ndim(x) == 3
    if K.image_data_format() == 'channels_first':
        features = K.batch_flatten(x)
    else:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram

# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image 
Example #21
Source File: neural_doodle.py    From pCVR with Apache License 2.0 5 votes vote down vote up
def gram_matrix(x):
    assert K.ndim(x) == 3
    features = K.batch_flatten(x)
    gram = K.dot(features, K.transpose(features))
    return gram 
Example #22
Source File: neural_style_transfer.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def gram_matrix(x):
    assert K.ndim(x) == 3
    if K.image_data_format() == 'channels_first':
        features = K.batch_flatten(x)
    else:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram

# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image 
Example #23
Source File: neural_style_transfer.py    From Style_Migration_For_Artistic_Font_With_CNN with MIT License 5 votes vote down vote up
def gram_matrix(x):  # Gram矩阵
    assert K.ndim(x) == 3
    if K.image_data_format() == 'channels_first':
        features = K.batch_flatten(x)
    else:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram

# 风格损失,是风格图片与结果图片的Gram矩阵之差,并对所有元素求和 
Example #24
Source File: neural_doodle.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def gram_matrix(x):
    assert K.ndim(x) == 3
    features = K.batch_flatten(x)
    gram = K.dot(features, K.transpose(features))
    return gram 
Example #25
Source File: onto_attention.py    From onto-lstm with Apache License 2.0 5 votes vote down vote up
def step(self, x, states):
        h, c, att = self._step(x, states)
        if self.return_attention:
            # Flattening attention to (batch_size, senses*hyps)
            return K.batch_flatten(att), [h, c]
        else:
            return h, [h, c] 
Example #26
Source File: onto_attention.py    From onto-lstm with Apache License 2.0 5 votes vote down vote up
def get_initial_states(self, onto_nse_input, input_mask=None):
        input_to_read = onto_nse_input  # (batch_size, num_words, num_senses, num_hyps, output_dim + 1)
        memory_input = input_to_read[:, :, :, :, :-1]  # (bs, words, senses, hyps, output_dim)
        if input_mask is None:
            mem_0 = K.mean(memory_input, axis=(2, 3))  # (batch_size, num_words, output_dim)
        else:
            memory_mask = input_mask
            if K.ndim(onto_nse_input) != K.ndim(input_mask):
                memory_mask = K.expand_dims(input_mask)
            memory_mask = K.cast(memory_mask / (K.sum(memory_mask) + K.epsilon()), 'float32')
            mem_0 = K.sum(memory_input * memory_mask, axis=(2,3))  # (batch_size, num_words, output_dim)
        flattened_mem_0 = K.batch_flatten(mem_0)
        initial_states = self.reader.get_initial_states(input_to_read)
        initial_states += [flattened_mem_0]
        return initial_states 
Example #27
Source File: attention.py    From MusiteDeep with GNU General Public License v2.0 5 votes vote down vote up
def call(self, x, mask=None):
        x=x[:,:self.mydeletedim]
        return K.batch_flatten(x) 
Example #28
Source File: dagmm.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def create_dagmm_model(encoder, decoder, estimation_encoder, lambd_diag=0.005):
    x_in = Input(batch_shape=encoder.input_shape)
    zc = encoder(x_in)

    decoder.name = 'reconstruction'
    x_rec = decoder(zc)
    euclid_dist = Lambda(lambda args: K.sqrt(K.sum(K.batch_flatten(K.square(args[0] - args[1])),
                                                   axis=-1, keepdims=True) /
                                             K.sum(K.batch_flatten(K.square(args[0])),
                                                   axis=-1, keepdims=True)),
                         output_shape=(1,))([x_in, x_rec])
    cos_sim = Lambda(lambda args: K.batch_dot(K.l2_normalize(K.batch_flatten(args[0]), axis=-1),
                                              K.l2_normalize(K.batch_flatten(args[1]), axis=-1),
                                              axes=-1),
                     output_shape=(1,))([x_in, x_rec])

    zr = concatenate([euclid_dist, cos_sim])
    z = concatenate([zc, zr])

    gamma = estimation_encoder(z)

    gamma_ks = [Lambda(lambda g: g[:, k:k + 1], output_shape=(1,))(gamma)
                for k in range(estimation_encoder.output_shape[-1])]

    components = [GaussianMixtureComponent(lambd_diag)([z, gamma_k])
                  for gamma_k in gamma_ks]
    density = add(components) if len(components) > 1 else components[0]
    energy = Lambda(lambda dens: -K.log(dens), name='energy')(density)

    dagmm = Model(x_in, [x_rec, energy])

    return dagmm 
Example #29
Source File: adgan.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        interp_in, critic_interp_score_in = inputs[0], inputs[1]
        interp_critic_grad = K.batch_flatten(K.gradients(critic_interp_score_in, [interp_in])[0])
        interp_critic_grad_norm = K.sqrt(K.sum(K.square(interp_critic_grad), axis=-1, keepdims=True))
        return K.square(interp_critic_grad_norm - 1.)  # two sided regularisation
        # return K.square(K.relu(interp_critic_grad_norm - 1.))  # one sided regularisation 
Example #30
Source File: layers.py    From Keras-progressive_growing_of_gans with MIT License 5 votes vote down vote up
def call(self, inputs):
        target, wrt = inputs
        grads = K.gradients(target, wrt)
        assert len(grads) == 1
        grad = grads[0]
        return K.sqrt(K.sum(K.batch_flatten(K.square(grad)), axis=1, keepdims=True))