Python tensorflow.keras.backend.concatenate() Examples

The following are 30 code examples of tensorflow.keras.backend.concatenate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        last_state = K.expand_dims(inp_b[:, -1, :], 1)
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_last = last_state * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_last = K.l2_normalize(outp_last, -1)
            outp = K.batch_dot(outp_a, outp_last, axes=[2, 2])
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #2
Source File: nnet_survival.py    From nnet-survival with MIT License 6 votes vote down vote up
def surv_likelihood(n_intervals):
  """Create custom Keras loss function for neural network survival model. 
  Arguments
      n_intervals: the number of survival time intervals
  Returns
      Custom loss function that can be used with Keras
  """
  def loss(y_true, y_pred):
    """
    Required to have only 2 arguments by Keras.
    Arguments
        y_true: Tensor.
          First half of the values is 1 if individual survived that interval, 0 if not.
          Second half of the values is for individuals who failed, and is 1 for time interval during which failure occured, 0 for other intervals.
          See make_surv_array function.
        y_pred: Tensor, predicted survival probability (1-hazard probability) for each time interval.
    Returns
        Vector of losses for this minibatch.
    """
    cens_uncens = 1. + y_true[:,0:n_intervals] * (y_pred-1.) #component for all individuals
    uncens = 1. - y_true[:,n_intervals:2*n_intervals] * y_pred #component for only uncensored individuals
    return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood
  return loss 
Example #3
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _mask_rotation_matrix_zyz(self, params):
        phi = params[0] * 2 * np.pi - np.pi;       theta = params[1] * 2 * np.pi - np.pi;     psi_t = params[2] * 2 * np.pi - np.pi;
        
        loc_r = params[3:6] * 0    # magnitude of Fourier transformation is translation-invariant 
        
        
        a1 = self._rotation_matrix_axis(2, psi_t)
        a2 = self._rotation_matrix_axis(1, theta)
        a3 = self._rotation_matrix_axis(2, phi)     
        rm = K.dot(K.dot(a3,a2),a1)
        
        rm = tf.transpose(rm)
        
        c = K.dot(-rm, K.expand_dims(loc_r))
        
        rm = K.flatten(rm)
        
        theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])

        return theta 
Example #4
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _rotation_matrix_zyz(self, params):
        phi = params[0] * 2 * np.pi - np.pi;       theta = params[1] * 2 * np.pi - np.pi;     psi_t = params[2] * 2 * np.pi - np.pi;
        
        loc_r = params[3:6] * 2 - 1
        
        
        a1 = self._rotation_matrix_axis(2, psi_t)       # first rotate about z axis for angle psi_t
        a2 = self._rotation_matrix_axis(1, theta)
        a3 = self._rotation_matrix_axis(2, phi)     
        rm = K.dot(K.dot(a3,a2),a1)
        
        rm = tf.transpose(rm)
        
        c = K.dot(-rm, K.expand_dims(loc_r))
        
        rm = K.flatten(rm)
        
        theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])

        return theta 
Example #5
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x
        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_b = inp_b * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_b = K.l2_normalize(outp_b, -1)
            outp = K.batch_dot(outp_a, outp_b, axes=[2, 2])
            outp = K.max(outp, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #6
Source File: graphsage_conv.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Enforce sparse representation
        if not K.is_sparse(fltr):
            fltr = ops.dense_to_sparse(fltr)

        # Propagation
        indices = fltr.indices
        N = tf.shape(features, out_type=indices.dtype)[0]
        indices = ops.sparse_add_self_loops(indices, N)
        targets, sources = indices[:, -2], indices[:, -1]
        messages = tf.gather(features, sources)
        aggregated = self.aggregate_op(messages, targets, N)
        output = K.concatenate([features, aggregated])
        output = ops.dot(output, self.kernel)

        if self.use_bias:
            output = K.bias_add(output, self.bias)
        output = K.l2_normalize(output, axis=-1)
        if self.activation is not None:
            output = self.activation(output)
        return output 
Example #7
Source File: backend.py    From DeepPoseKit with Apache License 2.0 6 votes vote down vote up
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):

    x = K.cast(x, K.floatx())

    col_max = K.max(x, axis=1)
    row_max = K.max(x, axis=2)

    maxima = K.max(col_max, 1)
    maxima = K.expand_dims(maxima, -2) / confidence_scale

    cols = K.cast(K.argmax(col_max, -2), K.floatx())
    rows = K.cast(K.argmax(row_max, -2), K.floatx())
    cols = K.expand_dims(cols, -2) * coordinate_scale
    rows = K.expand_dims(rows, -2) * coordinate_scale

    maxima = K.concatenate([cols, rows, maxima], -2)

    return maxima 
Example #8
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[2, 2])
        alpha = K.l2_normalize(alpha, 1)
        alpha = K.one_hot(K.argmax(alpha, 1), K.int_shape(inp_a)[1])
        hmax = K.batch_dot(alpha, outp_b, axes=[1, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmax = hmax * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmax = K.l2_normalize(outp_hmax, -1)
            outp = K.batch_dot(outp_hmax, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
Example #9
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo3_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example #10
Source File: attn_augconv.py    From keras-attention-augmented-convs with MIT License 5 votes vote down vote up
def rel_to_abs(self, x):
        shape = K.shape(x)
        shape = [shape[i] for i in range(3)]
        B, Nh, L, = shape
        col_pad = K.zeros(K.stack([B, Nh, L, 1]))
        x = K.concatenate([x, col_pad], axis=3)
        flat_x = K.reshape(x, [B, Nh, L * 2 * L])
        flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
        flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
        final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
        final_x = final_x[:, :, :L, L - 1:]
        return final_x 
Example #11
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _pairwise_distances(self, inputs: List[Tensor]) -> Tensor:
        emb_c, emb_r = inputs
        bs = K.shape(emb_c)[0]
        embeddings = K.concatenate([emb_c, emb_r], 0)
        dot_product = K.dot(embeddings, K.transpose(embeddings))
        square_norm = K.batch_dot(embeddings, embeddings, axes=1)
        distances = K.transpose(square_norm) - 2.0 * dot_product + square_norm
        distances = distances[0:bs, bs:bs+bs]
        distances = K.clip(distances, 0.0, None)
        mask = K.cast(K.equal(distances, 0.0), K.dtype(distances))
        distances = distances + mask * 1e-16
        distances = K.sqrt(distances)
        distances = distances * (1.0 - mask)
        return distances 
Example #12
Source File: bilstm_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def _diff_mult_dist(self, inputs: List[Tensor]) -> Tensor:
        input1, input2 = inputs
        a = K.abs(input1 - input2)
        b = Multiply()(inputs)
        return K.concatenate([input1, input2, a, b]) 
Example #13
Source File: bilstm_gru_siamese_network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def create_model(self) -> Model:
        input = []
        if self.use_matrix:
            for i in range(self.num_context_turns + 1):
                input.append(Input(shape=(self.max_sequence_length,)))
            context = input[:self.num_context_turns]
            response = input[-1]
            emb_layer = self.embedding_layer()
            emb_c = [emb_layer(el) for el in context]
            emb_r = emb_layer(response)
        else:
            for i in range(self.num_context_turns + 1):
                input.append(Input(shape=(self.max_sequence_length, self.embedding_dim,)))
            context = input[:self.num_context_turns]
            response = input[-1]
            emb_c = context
            emb_r = response
        lstm_layer = self.lstm_layer()
        lstm_c = [lstm_layer(el) for el in emb_c]
        lstm_r = lstm_layer(emb_r)
        pooling_layer = GlobalMaxPooling1D(name="pooling")
        lstm_c = [pooling_layer(el) for el in lstm_c]
        lstm_r = pooling_layer(lstm_r)
        lstm_c = [Lambda(lambda x: K.expand_dims(x, 1))(el) for el in lstm_c]
        lstm_c = Lambda(lambda x: K.concatenate(x, 1))(lstm_c)
        gru_layer = GRU(2 * self.hidden_dim, name="gru")
        gru_c = gru_layer(lstm_c)

        if self.triplet_mode:
            dist = Lambda(self._pairwise_distances)([gru_c, lstm_r])
        else:
            dist = Lambda(self._diff_mult_dist)([gru_c, lstm_r])
            dist = Dense(1, activation='sigmoid', name="score_model")(dist)
        model = Model(context + [response], dist)
        return model 
Example #14
Source File: set2set.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def call(self, inputs, mask=None):
        features, feature_graph_index = inputs
        feature_graph_index = tf.reshape(feature_graph_index, (-1,))
        _, _, count = tf.unique_with_counts(feature_graph_index)
        m = kb.dot(features, self.m_weight)
        if self.use_bias:
            m += self.m_bias

        self.h = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        self.c = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], self.n_hidden]))
        q_star = tf.zeros(tf.stack(
            [tf.shape(input=features)[0], tf.shape(input=count)[0], 2 * self.n_hidden]))
        for i in range(self.T):
            self.h, c = self._lstm(q_star, self.c)
            e_i_t = tf.reduce_sum(
                input_tensor=m * repeat_with_index(self.h, feature_graph_index), axis=-1)
            exp = tf.exp(e_i_t)
            # print('exp shape ', exp.shape)
            seg_sum = tf.transpose(
                a=tf.math.segment_sum(
                    tf.transpose(a=exp, perm=[1, 0]),
                    feature_graph_index),
                perm=[1, 0])
            seg_sum = tf.expand_dims(seg_sum, axis=-1)
            # print('seg_sum shape', seg_sum.shape)
            interm = repeat_with_index(seg_sum, feature_graph_index)
            # print('interm shape', interm.shape)
            a_i_t = exp / interm[..., 0]
            # print(a_i_t.shape)
            r_t = tf.transpose(a=tf.math.segment_sum(
                tf.transpose(a=tf.multiply(m, a_i_t[:, :, None]), perm=[1, 0, 2]),
                feature_graph_index), perm=[1, 0, 2])
            q_star = kb.concatenate([self.h, r_t], axis=-1)
        return q_star 
Example #15
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo3_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    '''Get corrected boxes'''
    input_shape = K.cast(input_shape, K.dtype(box_xy))
    image_shape = K.cast(image_shape, K.dtype(box_xy))

    #reshape the image_shape tensor to align with boxes dimension
    image_shape = K.reshape(image_shape, [-1, 1, 1, 1, 2])

    new_shape = K.round(image_shape * K.min(input_shape/image_shape))
    offset = (input_shape-new_shape)/2./input_shape
    scale = input_shape/new_shape
    # reverse offset/scale to match (w,h) order
    offset = offset[..., ::-1]
    scale = scale[..., ::-1]

    box_xy = (box_xy - offset) * scale
    box_wh *= scale

    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)
    boxes =  K.concatenate([
        box_mins[..., 0:1],  # x_min
        box_mins[..., 1:2],  # y_min
        box_maxes[..., 0:1],  # x_max
        box_maxes[..., 1:2]  # y_max
    ])

    # Scale boxes back to original image shape.
    image_wh = image_shape[..., ::-1]
    boxes *= K.concatenate([image_wh, image_wh])
    return boxes 
Example #16
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo2_boxes_to_corners(box_xy, box_wh):
    """Convert YOLOv2 box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ]) 
Example #17
Source File: attention.py    From fancy-nlp with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, mask=None):
        """
        convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim]
        """
        multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q),
                                                 self.num_heads, axis=2), axis=0)
        multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k),
                                               self.num_heads, axis=2), axis=0)
        multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v),
                                                 self.num_heads, axis=2), axis=0)

        """scaled dot product"""
        scaled = K.int_shape(inputs)[-1] ** -0.5
        attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled
        # apply mask before normalization (softmax)
        if mask is not None:
            multihead_mask = K.tile(mask, [self.num_heads, 1])
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2)
            attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1)
        # normalization
        attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
        # apply attention
        attend = K.batch_dot(attend, multihead_value, axes=(2, 1))
        attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2)
        attend = K.dot(attend, self.w_final)

        if self.residual:
            attend = attend + inputs
        if self.normalize:
            mean = K.mean(attend, axis=-1, keepdims=True)
            std = K.mean(attend, axis=-1, keepdims=True)
            attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta

        return attend 
Example #18
Source File: nnet_survival.py    From nnet-survival with MIT License 5 votes vote down vote up
def surv_likelihood_rnn(n_intervals):
  """Create custom Keras loss function for neural network survival model. Used for recurrent neural networks with time-distributed output.
       This function is very similar to surv_likelihood but deals with the extra dimension of y_true and y_pred that exists because of the time-distributed output.
  """
  def loss(y_true, y_pred):
    cens_uncens = 1. + y_true[0,:,0:n_intervals] * (y_pred-1.) #component for all patients
    uncens = 1. - y_true[0,:,n_intervals:2*n_intervals] * y_pred #component for only uncensored patients
    return K.sum(-K.log(K.clip(K.concatenate((cens_uncens,uncens)),K.epsilon(),None)),axis=-1) #return -log likelihood
  return loss 
Example #19
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo2_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
    #box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(grid_shape[..., ::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.softmax(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example #20
Source File: ttfs_dyn_thresh.py    From snn_toolbox with MIT License 5 votes vote down vote up
def spike_call(call):
    def decorator(self, x):

        updates = []
        if hasattr(self, 'kernel'):
            store_old_kernel = self._kernel.assign(self.kernel)
            store_old_bias = self._bias.assign(self.bias)
            updates += [store_old_kernel, store_old_bias]
            with tf.control_dependencies(updates):
                new_kernel = k.abs(self.kernel)
                new_bias = k.zeros_like(self.bias)
                assign_new_kernel = self.kernel.assign(new_kernel)
                assign_new_bias = self.bias.assign(new_bias)
                updates += [assign_new_kernel, assign_new_bias]
            with tf.control_dependencies(updates):
                c = call(self, x)[self.batch_size:]
                cc = k.concatenate([c, c], 0)
                updates = [self.missing_impulse.assign(cc)]
                with tf.control_dependencies(updates):
                    updates = [self.kernel.assign(self._kernel),
                               self.bias.assign(self._bias)]
        elif 'AveragePooling' in self.name:
            c = call(self, x)[self.batch_size:]
            cc = k.concatenate([c, c], 0)
            updates = [self.missing_impulse.assign(cc)]
        else:
            updates = []

        with tf.control_dependencies(updates):
            # Only call layer if there are input spikes. This is to prevent
            # accumulation of bias.
            self.impulse = \
                tf.cond(k.any(k.not_equal(x[:self.batch_size], 0)),
                        lambda: call(self, x),
                        lambda: k.zeros_like(self.mem))
            psp = self.update_neurons()[:self.batch_size]

        return k.concatenate([psp,
                              self.prospective_spikes[self.batch_size:]], 0)

    return decorator 
Example #21
Source File: time_frequency.py    From kapre with MIT License 5 votes vote down vote up
def call(self, x):
        output = self._spectrogram_mono(x[:, 0:1, :])
        if self.is_mono is False:
            for ch_idx in range(1, self.n_ch):
                output = K.concatenate(
                    (output, self._spectrogram_mono(x[:, ch_idx : ch_idx + 1, :])),
                    axis=self.ch_axis_idx,
                )
        if self.power_spectrogram != 2.0:
            output = K.pow(K.sqrt(output), self.power_spectrogram)
        if self.return_decibel_spectrogram:
            output = backend_keras.amplitude_to_decibel(output)
        return output 
Example #22
Source File: attn_augconv.py    From keras-attention-augmented-convs with MIT License 5 votes vote down vote up
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    output = BatchNormalization()(output)
    return output 
Example #23
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def call(self, inputs):
    """Execute this layer on input tensors.

    Parameters
    ----------
    inputs: list
      List of two tensors (X, Xp). X should be of shape (n_test,
      n_feat) and Xp should be of shape (n_support, n_feat) where
      n_test is the size of the test set, n_support that of the support
      set, and n_feat is the number of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output
      shape will be [(n_test, n_feat), (n_support, n_feat)]
    """
    if len(inputs) != 2:
      raise ValueError("AttnLSTMEmbedding layer must have exactly two parents")
    # x is test set, xp is support set.
    x, xp = inputs

    # Get initializations
    q = self.q_init
    states = self.states_init

    for d in range(self.max_depth):
      # Process using attention
      # Eqn (4), appendix A.1 of Matching Networks paper
      e = _cosine_dist(x + q, xp)
      a = tf.nn.softmax(e)
      r = backend.dot(a, xp)

      # Generate new attention states
      y = backend.concatenate([q, r], axis=1)
      q, states = self.lstm([y] + states)
    return [x + q, xp] 
Example #24
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def call(self, inputs):
    # check that there isnt just one or zero inputs
    if len(inputs) <= 1:
      raise ValueError("AlphaShare must have more than one input")
    self.num_outputs = len(inputs)
    # create subspaces
    subspaces = []
    original_cols = int(inputs[0].get_shape()[-1])
    subspace_size = int(original_cols / 2)
    for input_tensor in inputs:
      subspaces.append(tf.reshape(input_tensor[:, :subspace_size], [-1]))
      subspaces.append(tf.reshape(input_tensor[:, subspace_size:], [-1]))
    n_alphas = len(subspaces)
    subspaces = tf.reshape(tf.stack(subspaces), [n_alphas, -1])
    subspaces = tf.matmul(self.alphas, subspaces)

    # concatenate subspaces, reshape to size of original input, then stack
    # such that out_tensor has shape (2,?,original_cols)
    count = 0
    out_tensors = []
    tmp_tensor = []
    for row in range(n_alphas):
      tmp_tensor.append(tf.reshape(subspaces[row,], [-1, subspace_size]))
      count += 1
      if (count == 2):
        out_tensors.append(tf.concat(tmp_tensor, 1))
        tmp_tensor = []
        count = 0
    return out_tensors 
Example #25
Source File: layers.py    From deepchem with MIT License 5 votes vote down vote up
def build(self, input_shape):
    init = initializers.get(self.init)
    self.U = init((2 * self.n_hidden, 4 * self.n_hidden))
    self.b = tf.Variable(
        np.concatenate((np.zeros(self.n_hidden), np.ones(self.n_hidden),
                        np.zeros(self.n_hidden), np.zeros(self.n_hidden))),
        dtype=tf.float32)
    self.built = True 
Example #26
Source File: edge_conv.py    From spektral with MIT License 5 votes vote down vote up
def message(self, X, **kwargs):
        X_i = self.get_i(X)
        X_j = self.get_j(X)
        return self.mlp(K.concatenate((X_i, X_j - X_i))) 
Example #27
Source File: tag_conv.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        X, A, E = self.get_inputs(inputs)
        edge_weight = A.values

        output = [X]
        for k in range(self.K):
            output.append(self.propagate(X, A, E, edge_weight=edge_weight))
        output = K.concatenate(output)

        return self.linear(output) 
Example #28
Source File: crystal_conv.py    From spektral with MIT License 5 votes vote down vote up
def message(self, X, E=None):
        X_i = self.get_i(X)
        X_j = self.get_j(X)
        Z = K.concatenate((X_i, X_j, E), axis=-1)
        output = self.dense_s(Z) * self.dense_f(Z)

        return output 
Example #29
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def _concat(lists, dim):
    if lists[0].size == 0:
        lists = lists[1:]

    return np.concatenate(lists, dim) 
Example #30
Source File: OneHotEvolutionaryModel.py    From tape-neurips2019 with MIT License 5 votes vote down vote up
def call(self, inputs):
        encoder_output = K.one_hot(inputs['primary'], self._n_symbols)
        try: 
            encoder_output = K.concatenate((encoder_output, inputs['evolutionary']))
        except KeyError:
            raise TypeError("Evolutionary inputs not available for this task.")
        inputs['encoder_output'] = encoder_output
        return inputs