Python tensorflow.keras.layers.Multiply() Examples

The following are 9 code examples of tensorflow.keras.layers.Multiply(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: yolo3_nano.py    From keras-YOLOv3-model-set with MIT License 6 votes vote down vote up
def _fca_block(inputs, reduct_ratio, block_id):
    in_channels = inputs.shape.as_list()[-1]
    #in_shapes = inputs.shape.as_list()[1:3]
    reduct_channels = int(in_channels // reduct_ratio)
    prefix = 'fca_block_{}_'.format(block_id)
    x = GlobalAveragePooling2D(name=prefix + 'average_pooling')(inputs)
    x = Dense(reduct_channels, activation='relu', name=prefix + 'fc1')(x)
    x = Dense(in_channels, activation='sigmoid', name=prefix + 'fc2')(x)

    x = Reshape((1,1,in_channels),name='reshape')(x)
    x = Multiply(name=prefix + 'multiply')([x, inputs])
    return x 
Example #2
Source File: mobilenet_v3.py    From keras-YOLOv3-model-set with MIT License 6 votes vote down vote up
def _se_block(inputs, filters, se_ratio, prefix):
    x = GlobalAveragePooling2D(name=prefix + 'squeeze_excite/AvgPool')(inputs)
    if K.image_data_format() == 'channels_first':
        x = Reshape((filters, 1, 1))(x)
    else:
        x = Reshape((1, 1, filters))(x)
    x = Conv2D(_depth(filters * se_ratio),
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv')(x)
    x = ReLU(name=prefix + 'squeeze_excite/Relu')(x)
    x = Conv2D(filters,
                      kernel_size=1,
                      padding='same',
                      name=prefix + 'squeeze_excite/Conv_1')(x)
    x = Activation(hard_sigmoid)(x)
    #if K.backend() == 'theano':
        ## For the Theano backend, we have to explicitly make
        ## the excitation weights broadcastable.
        #x = Lambda(
            #lambda br: K.pattern_broadcast(br, [True, True, True, False]),
            #output_shape=lambda input_shape: input_shape,
            #name=prefix + 'squeeze_excite/broadcast')(x)
    x = Multiply(name=prefix + 'squeeze_excite/Mul')([inputs, x])
    return x 
Example #3
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def TemporalDropout(inputs, dropout=0.0):
    """
    Drops with :dropout probability temporal steps of input 3D tensor
    """
    # TO DO: adapt for >3D tensors
    if dropout == 0.0:
        return inputs
    inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
    inputs_mask = Lambda(inputs_func)(inputs)
    inputs_mask = Dropout(dropout)(inputs_mask)
    tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
    inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
                         output_shape=inputs._keras_shape[1:])(inputs_mask)
    answer = Multiply()([inputs, inputs_mask])
    return answer 
Example #4
Source File: keras_layers.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
    """
    Compute multiplicative self attention for time series of vectors (with batch dimension)
    the formula: score(h_i, h_j) = <W_1 h_i,  W_2 h_j>,  W_1 and W_2 are learnable matrices
    with dimensionality [n_hidden, n_input_features]

    Args:
        units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]
        n_hidden: number of units in hidden representation of similarity measure
        n_output_features: number of features in output dense layer
        activation: activation at the output

    Returns:
        output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]
    """
    n_input_features = K.int_shape(units)[2]
    if n_hidden is None:
        n_hidden = n_input_features
    if n_output_features is None:
        n_output_features = n_input_features
    exp1 = Lambda(lambda x: expand_tile(x, axis=1))(units)
    exp2 = Lambda(lambda x: expand_tile(x, axis=2))(units)
    queries = Dense(n_hidden)(exp1)
    keys = Dense(n_hidden)(exp2)
    scores = Lambda(lambda x: K.sum(queries * x, axis=3, keepdims=True))(keys)
    attention = Lambda(lambda x: softmax(x, axis=2))(scores)
    mult = Multiply()([attention, exp1])
    attended_units = Lambda(lambda x: K.sum(x, axis=2))(mult)
    output = Dense(n_output_features, activation=activation)(attended_units)
    return output 
Example #5
Source File: squeeze_excitation.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def channel_squeeze_excite_block(input, ratio=0.25):
    init = input
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    filters = init._keras_shape[channel_axis]
    cse_shape = (1, 1, filters)

    cse = layers.GlobalAveragePooling2D()(init)
    cse = layers.Reshape(cse_shape)(cse)
    ratio_filters = int(np.round(filters * ratio))
    if ratio_filters < 1:
        ratio_filters += 1
    cse = layers.Conv2D(
        ratio_filters,
        (1, 1),
        padding="same",
        activation="relu",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)
    cse = layers.BatchNormalization()(cse)
    cse = layers.Conv2D(
        filters,
        (1, 1),
        activation="sigmoid",
        kernel_initializer="he_normal",
        use_bias=False,
    )(cse)

    if K.image_data_format() == "channels_first":
        cse = layers.Permute((3, 1, 2))(cse)

    cse = layers.Multiply()([init, cse])
    return cse 
Example #6
Source File: squeeze_excitation.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def spatial_squeeze_excite_block(input):
    sse = layers.Conv2D(
        1,
        (1, 1),
        activation="sigmoid",
        padding="same",
        kernel_initializer="he_normal",
        use_bias=False,
    )(input)
    sse = layers.Multiply()([input, sse])

    return sse 
Example #7
Source File: mobilenet_v3.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def hard_swish(x):
    return Multiply()([Activation(hard_sigmoid)(x), x])


# This function is taken from the original tf repo.
# It ensures that all layers have a channel number that is divisible by 8
# It can be seen here:
# https://github.com/tensorflow/models/blob/master/research/
# slim/nets/mobilenet/mobilenet.py 
Example #8
Source File: progressive_multitask.py    From deepchem with MIT License 4 votes vote down vote up
def add_adapter(self, all_layers, task, layer_num):
    """Add an adapter connection for given task/layer combo"""
    i = layer_num
    prev_layers = []
    trainable_layers = []
    # Handle output layer
    if i < len(self.layer_sizes):
      layer_sizes = self.layer_sizes
      alpha_init_stddev = self.alpha_init_stddevs[i]
      weight_init_stddev = self.weight_init_stddevs[i]
      bias_init_const = self.bias_init_consts[i]
    elif i == len(self.layer_sizes):
      layer_sizes = self.layer_sizes + [self.n_outputs]
      alpha_init_stddev = self.alpha_init_stddevs[-1]
      weight_init_stddev = self.weight_init_stddevs[-1]
      bias_init_const = self.bias_init_consts[-1]
    else:
      raise ValueError("layer_num too large for add_adapter.")
    # Iterate over all previous tasks.
    for prev_task in range(task):
      prev_layers.append(all_layers[(i - 1, prev_task)])
    # prev_layers is a list with elements of size
    # (batch_size, layer_sizes[i-1])
    if len(prev_layers) == 1:
      prev_layer = prev_layers[0]
    else:
      prev_layer = Concatenate(axis=1)(prev_layers)
    alpha = layers.Variable(
        tf.random.truncated_normal((1,), stddev=alpha_init_stddev))
    trainable_layers.append(alpha)

    prev_layer = Multiply()([prev_layer, alpha([prev_layer])])
    dense1 = Dense(
        layer_sizes[i - 1],
        kernel_initializer=tf.keras.initializers.TruncatedNormal(
            stddev=weight_init_stddev),
        bias_initializer=tf.constant_initializer(value=bias_init_const))
    prev_layer = dense1(prev_layer)
    trainable_layers.append(dense1)

    dense2 = Dense(
        layer_sizes[i],
        kernel_initializer=tf.keras.initializers.TruncatedNormal(
            stddev=weight_init_stddev),
        use_bias=False)
    prev_layer = dense2(prev_layer)
    trainable_layers.append(dense2)

    return prev_layer, trainable_layers 
Example #9
Source File: osnet.py    From keras_imagenet with MIT License 4 votes vote down vote up
def os_bottleneck(x,
                  out_filters,
                  bottleneck_reduction=4):
    """Utility function to implement the OSNet bottleneck module.

    # Arguments
        x: input tensor.
        out_filters: number of output filters.

    # Returns
        Output tensor after applying the OSNet bottleneck.
    """
    in_filters = x.shape[-1].value
    mid_filters = out_filters // bottleneck_reduction
    identity = x
    x1 = conv2d_bn(x, mid_filters, kernel_size=(1, 1))

    branch1 = light_conv3x3_bn(x1, mid_filters)
    branch2 = light_conv3x3_bn(x1, mid_filters)
    branch2 = light_conv3x3_bn(branch2, mid_filters)
    branch3 = light_conv3x3_bn(x1, mid_filters)
    branch3 = light_conv3x3_bn(branch3, mid_filters)
    branch3 = light_conv3x3_bn(branch3, mid_filters)
    branch4 = light_conv3x3_bn(x1, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)
    branch4 = light_conv3x3_bn(branch4, mid_filters)

    gate = get_aggregation_gate(mid_filters)
    x2 = layers.Add()([
        layers.Multiply()([branch1, gate(branch1)]),
        layers.Multiply()([branch2, gate(branch2)]),
        layers.Multiply()([branch3, gate(branch3)]),
        layers.Multiply()([branch4, gate(branch4)])])

    x3 = conv2d_bn(x2, out_filters, kernel_size=(1, 1), activation=None)
    if in_filters != out_filters:
        identity = conv2d_bn(identity, out_filters, kernel_size=(1, 1), activation=None)

    out = layers.Add()([identity, x3])  # residual connection
    out = layers.Activation('relu')(out)
    return out