Python tensorflow.keras.backend.int_shape() Examples

The following are 30 code examples of tensorflow.keras.backend.int_shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: matmul.py    From spektral with MIT License 7 votes vote down vote up
def mixed_mode_dot(a, b):
    """
    Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
    works for both dense and sparse inputs.
    :param a: Tensor or SparseTensor with rank 2.
    :param b: Tensor or SparseTensor with rank 3.
    :return: Tensor or SparseTensor with rank 3.
    """
    s_0_, s_1_, s_2_ = K.int_shape(b)
    B_T = ops.transpose(b, (1, 2, 0))
    B_T = ops.reshape(B_T, (s_1_, -1))
    output = dot(a, B_T)
    output = ops.reshape(output, (s_1_, s_2_, -1))
    output = ops.transpose(output, (2, 0, 1))

    return output 
Example #2
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 7 votes vote down vote up
def expand_tile(units, axis):
    """
    Expand and tile tensor along given axis

    Args:
        units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
        axis: axis along which expand and tile. Must be 1 or 2

    """
    assert axis in (1, 2)
    n_time_steps = K.int_shape(units)[1]
    repetitions = [1, 1, 1, 1]
    repetitions[axis] = n_time_steps
    if axis == 1:
        expanded = Reshape(target_shape=((1,) + K.int_shape(units)[1:]))(units)
    else:
        expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
    return K.tile(expanded, repetitions) 
Example #3
Source File: vae-cnn-mnist-8.1.2.py    From Advanced-Deep-Learning-with-Keras with MIT License 7 votes vote down vote up
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #4
Source File: custom_activation.py    From Echo with MIT License 6 votes vote down vote up
def call(self, inputs):
        def brelu(x):
            # get shape of X, we are interested in the last axis, which is constant
            shape = K.int_shape(x)
            # last axis
            dim = shape[-1]
            # half of the last axis (+1 if necessary)
            dim2 = dim // 2
            if dim % 2 != 0:
                dim2 += 1
            # multiplier will be a tensor of alternated +1 and -1
            multiplier = K.ones((dim2,))
            multiplier = K.stack([multiplier, -multiplier], axis=-1)
            if dim % 2 != 0:
                multiplier = multiplier[:-1]
            # adjust multiplier shape to the shape of x
            multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
            return multiplier * tf.nn.relu(multiplier * x)

        return Lambda(brelu)(inputs) 
Example #5
Source File: layers.py    From thundernet-tensorflow2.0 with MIT License 6 votes vote down vote up
def channle_shuffle(inputs, group):
    """Shuffle the channel
    Args:
        inputs: 4D Tensor
        group: int, number of groups
    Returns:
        Shuffled 4D Tensor
    """
    #in_shape = inputs.get_shape().as_list()
    h, w, in_channel  = K.int_shape(inputs)[1:]
    #h, w, in_channel = in_shape[1:]
    assert(in_channel % group == 0)
    l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
    l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
    l = K.reshape(l, [-1, h, w, in_channel])

    return l 
Example #6
Source File: mobilenet_v3.py    From keras-YOLOv3-model-set with MIT License 6 votes vote down vote up
def correct_pad(backend, inputs, kernel_size):
    """Returns a tuple for zero-padding for 2D convolution with downsampling.
    # Arguments
        input_size: An integer or tuple/list of 2 integers.
        kernel_size: An integer or tuple/list of 2 integers.
    # Returns
        A tuple.
    """
    img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
    input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if input_size[0] is None:
        adjust = (1, 1)
    else:
        adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)

    correct = (kernel_size[0] // 2, kernel_size[1] // 2)

    return ((correct[0] - adjust[0], correct[0]),
            (correct[1] - adjust[1], correct[1])) 
Example #7
Source File: hourglass.py    From Centernet-Tensorflow2.0 with Apache License 2.0 6 votes vote down vote up
def residual(_x, out_dim, name, stride=1):
  shortcut = _x
  num_channels = K.int_shape(shortcut)[-1]
  _x = ZeroPadding2D(padding=1, name=name + '.pad1')(_x)
  _x = Conv2D(out_dim, 3, strides=stride, use_bias=False, name=name + '.conv1')(_x)
  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(_x)
  _x = Activation('relu', name=name + '.relu1')(_x)

  _x = Conv2D(out_dim, 3, padding='same', use_bias=False, name=name + '.conv2')(_x)
  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(_x)

  if num_channels != out_dim or stride != 1:
    shortcut = Conv2D(out_dim, 1, strides=stride, use_bias=False, name=name + '.shortcut.0')(
        shortcut)
    shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut)

  _x = Add(name=name + '.add')([_x, shortcut])
  _x = Activation('relu', name=name + '.relu')(_x)
  return _x 
Example #8
Source File: hourglass.py    From DeepPoseKit with Apache License 2.0 6 votes vote down vote up
def __call__(self, inputs):
        identity = inputs
        if K.int_shape(identity)[-1] == self.filters:
            identity = self.identity_bn(identity)
        else:
            identity = self.identity_bn(identity)
            identity = self.identity_1x1(identity)

        x = inputs
        x = self.bottleneck_1x1_bn(x)
        x = self.bottleneck_1x1(x)

        x = self.bottleneck_3x3_bn(x)
        x = self.bottleneck_3x3(x)

        x = self.expansion_1x1_bn(x)
        x = self.expansion_1x1(x)

        x = self.residual_add_bn(x)
        return self.residual_add([identity, x]) 
Example #9
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[1, 1])
        alpha = K.l2_normalize(alpha, 1)
        hmean = K.batch_dot(outp_b, alpha, axes=[2, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmean = hmean * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmean = K.l2_normalize(outp_hmean, -1)
            outp = K.batch_dot(outp_hmean, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #10
Source File: rnn.py    From cxplain with MIT License 6 votes vote down vote up
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
Example #11
Source File: cvae-cnn-mnist-8.2.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def sampling(args):
    """Implements reparameterization trick by sampling
    from a gaussian with zero mean and std=1.

    Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    Returns:
        sampled latent vector (tensor)
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #12
Source File: vae-mlp-mnist-8.1.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    # K is the keras backend
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example #13
Source File: main.py    From Implementation-CVPR2015-CNN-for-ReID with MIT License 5 votes vote down vote up
def _upsample_neighbor_function(input_x):
    input_x_pad = K.spatial_2d_padding(input_x, padding=((2, 2), (2, 2)))
    x_length = K.int_shape(input_x)[1]
    y_length = K.int_shape(input_x)[2]
    output_x_list = []
    output_y_list = []
    for i_x in range(2, x_length + 2):
        for i_y in range(2, y_length + 2):
            output_y_list.append(input_x_pad[:, i_x-2:i_x+3, i_y-2:i_y+3, :])
        output_x_list.append(K.concatenate(output_y_list, axis=2))
        output_y_list = []
    return K.concatenate(output_x_list, axis=1) 
Example #14
Source File: attn_augconv.py    From keras-attention-augmented-convs with MIT License 5 votes vote down vote up
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    output = BatchNormalization()(output)
    return output 
Example #15
Source File: layers.py    From thundernet-tensorflow2.0 with MIT License 5 votes vote down vote up
def call(self, inputs, training=False):
        x = self.conv1_bn_relu(inputs, training=training)
        x = self.pool1(x)
        x = self.stage2(x, training=training)
        C4 = self.stage3(x, training=training)
        C5 = self.stage4(C4, training=training)
        #print("C5: ", K.int_shape(C5))
        #x = self.conv5_bn_relu(x, training=training)
        Cglb = self.gap(C5)
        print(K.int_shape(Cglb))
        x = self.linear(Cglb)
        #print(K.int_shape(x))

        return x, C4, C5, Cglb 
Example #16
Source File: layers.py    From thundernet-tensorflow2.0 with MIT License 5 votes vote down vote up
def call(self, inputs, training=False):
        #print(K.int_shape(inputs))
        # split the channel
        shortcut, x = tf.split(inputs, 2, axis=3)

        x = self.conv1_bn_relu(x, training=training)
        x = self.dconv_bn(x, training=training)
        x = self.conv2_bn_relu(x, training=training)

        x = tf.concat([shortcut, x], axis=3)
        #print(K.int_shape(x))
        x = channle_shuffle(x, 2)
        return x 
Example #17
Source File: mobilenet_base.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def _bottleneck(self, inputs, filters, kernel, e, s, squeeze, nl):
        """Bottleneck
        This function defines a basic bottleneck structure.
        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            e: Integer, expansion factor.
                t is always applied to the input size.
            s: An integer or tuple/list of 2 integers,specifying the strides
                of the convolution along the width and height.Can be a single
                integer to specify the same value for all spatial dimensions.
            squeeze: Boolean, Whether to use the squeeze.
            nl: String, nonlinearity activation type.
        # Returns
            Output tensor.
        """

        channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
        input_shape = K.int_shape(inputs)
        tchannel = input_shape[channel_axis] * e
        r = s == 1 and input_shape[3] == filters

        x = self._conv_block(inputs, tchannel, (1, 1), (1, 1), nl)

        x = DepthwiseConv2D(kernel, strides=(s, s), depth_multiplier=1, padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if squeeze:
            x = Lambda(lambda x: x * self._squeeze(x))(x)

        x = self._return_activation(x, nl)

        x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
        x = BatchNormalization(axis=channel_axis)(x)

        if r:
            x = Add()([x, inputs])

        return x 
Example #18
Source File: operation_layers.py    From onnx2keras with MIT License 5 votes vote down vote up
def convert_split(node, params, layers, lambda_func, node_name, keras_names):
    """
    Convert Split layer
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: internal converter name
    :param keras_name: resulting layer name
    :return: None
    """
    if len(node.input) != 1:
        assert AttributeError('More than 1 input for split layer.')

    input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_names[0])
    splits = params["split"]
    axis = params.get("axis", 0)
    if not isinstance(splits, Iterable):
        # This might not work if `split` is a tensor.
        chunk_size = K.int_size(input_0)[axis] // splits
        splits = (chunk_size,) * splits

    cur = 0
    for i, split in enumerate(splits):
        node_name = params['_outputs'][i]

        def target_layer(x, axis=axis, start_i=cur, end_i=cur+split):
            slices = [slice(None, None)] * len(K.int_shape(x))
            slices[axis] = slice(start_i, end_i)
            return x[tuple(slices)]

        lambda_layer = keras.layers.Lambda(target_layer, name=keras_names[i])
        layers[node_name] = lambda_layer(input_0)
        lambda_func[keras_names[i]] = target_layer
        cur += split 
Example #19
Source File: base.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        F = K.int_shape(inputs)[-1]
        minkowski_prod_mat = np.eye(F)
        minkowski_prod_mat[-1, -1] = -1.
        minkowski_prod_mat = K.constant(minkowski_prod_mat)
        output = K.dot(inputs, minkowski_prod_mat)
        output = K.dot(output, K.transpose(inputs))
        output = K.clip(output, -10e9, -1.)

        if self.activation is not None:
            output = self.activation(output)

        return output 
Example #20
Source File: gated_graph_conv.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        X, A, E = self.get_inputs(inputs)
        F = K.int_shape(X)[-1]

        to_pad = self.channels - F
        output = tf.pad(X, [[0, 0], [0, to_pad]])
        for i in range(self.n_layers):
            m = tf.matmul(output, self.kernel[i])
            m = self.propagate(m, A)
            output = self.rnn(m, [output])[0]

        output = self.activation(output)
        return output 
Example #21
Source File: affinity_loss_tpu.py    From affinity-loss with MIT License 5 votes vote down vote up
def call(self, f):
        # Euclidean space similarity measure
        # calculate d(f_i, w_j)
        f_expand = tf.expand_dims(tf.expand_dims(f, axis=1), axis=1)
        w_expand = tf.expand_dims(self.W, axis=0)
        fw_norm = tf.reduce_sum((f_expand-w_expand)**2, axis=-1)
        distance = tf.exp(-fw_norm/self.sigma)
        distance = tf.reduce_max(distance, axis=-1) # (N,C,m)->(N,C)

        # Regularization
        hidden_layers = K.int_shape(self.W)[2]
        mc = self.n_classes * self.n_centers
        w_reshape = tf.reshape(self.W, [mc, hidden_layers])
        w_reshape_expand1 = tf.expand_dims(w_reshape, axis=0)
        w_reshape_expand2 = tf.expand_dims(w_reshape, axis=1)
        w_norm_mat = tf.reduce_sum((w_reshape_expand2 - w_reshape_expand1)**2, axis=-1)
        w_norm_upper = self.upper_triangle(w_norm_mat)
        mu = 2.0 / (mc**2 - mc) * tf.reduce_sum(w_norm_upper)
        residuals = self.upper_triangle((w_norm_upper - mu)**2)
        rw = 2.0 / (mc**2 - mc) * tf.reduce_sum(residuals)

        batch_size = tf.shape(f)[0]
        rw_broadcast = tf.ones((batch_size,1)) * rw

        # outputs distance(N, C) + rw(N,)
        output = tf.concat([distance, rw_broadcast], axis=-1)
        return output 
Example #22
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[2, 2])
        alpha = K.l2_normalize(alpha, 1)
        alpha = K.one_hot(K.argmax(alpha, 1), K.int_shape(inp_a)[1])
        hmax = K.batch_dot(alpha, outp_b, axes=[1, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmax = hmax * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmax = K.l2_normalize(outp_hmax, -1)
            outp = K.batch_dot(outp_hmax, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #23
Source File: ecc_conv.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        X = inputs[0]  # (batch_size, N, F)
        A = inputs[1]  # (batch_size, N, N)
        E = inputs[2]  # (n_edges, S) or (batch_size, N, N, S)

        mode = ops.autodetect_mode(A, X)
        if mode == modes.SINGLE:
            return self._call_single(inputs)

        # Parameters
        N = K.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Filter network
        kernel_network = E
        for l in self.kernel_network_layers:
            kernel_network = l(kernel_network)

        # Convolution
        target_shape = (-1, N, N, F_, F) if mode == modes.BATCH else (N, N, F_, F)
        kernel = K.reshape(kernel_network, target_shape)
        output = kernel * A[..., None, None]
        output = tf.einsum('abicf,aif->abc', output, X)

        if self.root:
            output += ops.dot(X, self.root_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
Example #24
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
    """
    Compute multiplicative self attention for time series of vectors (with batch dimension)
    the formula: score(h_i, h_j) = <W_1 h_i,  W_2 h_j>,  W_1 and W_2 are learnable matrices
    with dimensionality [n_hidden, n_input_features]

    Args:
        units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
        n_hidden: number of units in hidden representation of similarity measure
        n_output_features: number of features in output dense layer
        activation: activation at the output

    Returns:
        output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
    """
    n_input_features = K.int_shape(units)[2]
    if n_hidden is None:
        n_hidden = n_input_features
    if n_output_features is None:
        n_output_features = n_input_features
    exp1 = Lambda(lambda x: expand_tile(x, axis=1))(units)
    exp2 = Lambda(lambda x: expand_tile(x, axis=2))(units)
    queries = Dense(n_hidden)(exp1)
    keys = Dense(n_hidden)(exp2)
    scores = Lambda(lambda x: K.sum(queries * x, axis=3, keepdims=True))(keys)
    attention = Lambda(lambda x: softmax(x, axis=2))(scores)
    mult = Multiply()([attention, exp1])
    attended_units = Lambda(lambda x: K.sum(x, axis=2))(mult)
    output = Dense(n_output_features, activation=activation)(attended_units)
    return output 
Example #25
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
    """
    Compute additive self attention for time series of vectors (with batch dimension)
            the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
            v is a learnable vector of n_hidden dimensionality,
            W_1 and W_2 are learnable [n_hidden, n_input_features] matrices

    Args:
        units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
        n_hidden: number of2784131 units in hidden representation of similarity measure
        n_output_features: number of features in output dense layer
        activation: activation at the output

    Returns:
        output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
        """
    n_input_features = K.int_shape(units)[2]
    if n_hidden is None:
        n_hidden = n_input_features
    if n_output_features is None:
        n_output_features = n_input_features
    exp1 = Lambda(lambda x: expand_tile(x, axis=1))(units)
    exp2 = Lambda(lambda x: expand_tile(x, axis=2))(units)
    units_pairs = Concatenate(axis=3)([exp1, exp2])
    query = Dense(n_hidden, activation="tanh")(units_pairs)
    attention = Dense(1, activation=lambda x: softmax(x, axis=2))(query)
    attended_units = Lambda(lambda x: K.sum(attention * x, axis=2))(exp1)
    output = Dense(n_output_features, activation=activation)(attended_units)
    return output 
Example #26
Source File: base.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_dimension(self, graph: Dict) -> bool:
        """
        Check the model dimension against the graph converter dimension
        Args:
            graph: structure graph

        Returns:

        """
        test_inp = self.graph_converter.graph_to_input(graph)
        input_shapes = [i.shape for i in test_inp]

        model_input_shapes = [int_shape(i) for i in self.model.inputs]

        def _check_match(real_shape, tensor_shape):
            if len(real_shape) != len(tensor_shape):
                return False
            matched = True
            for i, j in zip(real_shape, tensor_shape):
                if j is None:
                    continue
                else:
                    if i == j:
                        continue
                    else:
                        matched = False
            return matched

        for i, j, k in zip(['atom features', 'bond features', 'state features'],
                           input_shapes[:3], model_input_shapes[:3]):
            matched = _check_match(j, k)
            if not matched:
                raise ValueError("The data dimension for %s is %s and does not match model "
                                 "required shape of %s" % (i, str(j), str(k))) 
Example #27
Source File: attention.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        """
        convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim]
        """
        multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q),
                                                 self.num_heads, axis=2), axis=0)
        multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k),
                                               self.num_heads, axis=2), axis=0)
        multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v),
                                                 self.num_heads, axis=2), axis=0)

        """scaled dot product"""
        scaled = K.int_shape(inputs)[-1] ** -0.5
        attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled
        # apply mask before normalization (softmax)
        if mask is not None:
            multihead_mask = K.tile(mask, [self.num_heads, 1])
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2)
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1)
        # normalization
        attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
        # apply attention
        attend = K.batch_dot(attend, multihead_value, axes=(2, 1))
        attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2)
        attend = K.dot(attend, self.w_final)

        if self.residual:
            attend = attend + inputs
        if self.normalize:
            mean = K.mean(attend, axis=-1, keepdims=True)
            std = K.mean(attend, axis=-1, keepdims=True)
            attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta

        return attend 
Example #28
Source File: vanilla_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def vanilla_unet(
    input_shape,
    num_classes=1,
    dropout=0.5, 
    filters=64,
    num_layers=4,
    output_activation='sigmoid'): # 'sigmoid' or 'softmax'

    # Build U-Net model
    inputs = Input(input_shape)
    x = inputs   

    down_layers = []
    for l in range(num_layers):
        x = conv2d_block(inputs=x, filters=filters, use_batch_norm=False, dropout=0.0, padding='valid')
        down_layers.append(x)
        x = MaxPooling2D((2, 2), strides=2) (x)
        filters = filters*2 # double the number of filters with each layer

    x = Dropout(dropout)(x)
    x = conv2d_block(inputs=x, filters=filters, use_batch_norm=False, dropout=0.0, padding='valid')

    for conv in reversed(down_layers):
        filters //= 2 # decreasing number of filters with each layer 
        x = Conv2DTranspose(filters, (2, 2), strides=(2, 2), padding='valid') (x)
        
        ch, cw = get_crop_shape(int_shape(conv), int_shape(x))
        conv = Cropping2D(cropping=(ch, cw))(conv)

        x = concatenate([x, conv])
        x = conv2d_block(inputs=x, filters=filters, use_batch_norm=False, dropout=0.0, padding='valid')
    
    outputs = Conv2D(num_classes, (1, 1), activation=output_activation) (x)    
    
    model = Model(inputs=[inputs], outputs=[outputs])
    return model 
Example #29
Source File: shapelets.py    From tslearn with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def call(self, x, **kwargs):
        # (x - y)^2 = x^2 + y^2 - 2 * x * y
        x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)
        y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1),
                         (1, 1, self.n_shapelets))
        xy = K.dot(x, K.transpose(self.kernel))
        return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1] 
Example #30
Source File: resnet.py    From keras-tuner with Apache License 2.0 4 votes vote down vote up
def block3(x, filters, kernel_size=3, stride=1, groups=32,
           conv_shortcut=True, name=None):
    """A residual block.
    # Arguments
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        kernel_size: default 3, kernel size of the bottleneck layer.
        stride: default 1, stride of the first layer.
        groups: default 32, group size for grouped convolution.
        conv_shortcut: default True, use convolution shortcut if True,
            otherwise identity shortcut.
        name: string, block label.
    # Returns
        Output tensor for the residual block.
    """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    if conv_shortcut is True:
        shortcut = layers.Conv2D((64 // groups) * filters, 1, strides=stride,
                                 use_bias=False, name=name + '_0_conv')(x)
        shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                             name=name + '_0_bn')(shortcut)
    else:
        shortcut = x

    x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_1_bn')(x)
    x = layers.Activation('relu', name=name + '_1_relu')(x)

    c = filters // groups
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)
    x = layers.DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c,
                               use_bias=False, name=name + '_2_conv')(x)
    x_shape = backend.int_shape(x)[1:-1]
    x = layers.Reshape(x_shape + (groups, c, c))(x)
    output_shape = x_shape + (groups,
                              c) if backend.backend() == 'theano' else None

    x = layers.Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]),
                      output_shape=output_shape, name=name + '_2_reduce')(x)

    x = layers.Reshape(x_shape + (filters,))(x)

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_2_bn')(x)

    x = layers.Activation('relu', name=name + '_2_relu')(x)

    x = layers.Conv2D((64 // groups) * filters, 1, use_bias=False,
                      name=name + '_3_conv')(x)

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                  name=name + '_3_bn')(x)

    x = layers.Add(name=name + '_add')([shortcut, x])
    x = layers.Activation('relu', name=name + '_out')(x)
    return x