Python keras.initializers.VarianceScaling() Examples

The following are 6 code examples of keras.initializers.VarianceScaling(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.initializers , or try the search function .
Example #1
Source File: inception_v4.py    From FashionAI_Tianchi_2018 with MIT License 6 votes vote down vote up
def conv2d_bn(x, nb_filter, num_row, num_col,
              padding='same', strides=(1, 1), use_bias=False):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, (num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.00004),
                      kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
    x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
    x = Activation('relu')(x)
    return x 
Example #2
Source File: inception_v4.py    From keras-inceptionV4 with Apache License 2.0 6 votes vote down vote up
def conv2d_bn(x, nb_filter, num_row, num_col,
              padding='same', strides=(1, 1), use_bias=False):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, (num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.00004),
                      kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal', seed=None))(x)
    x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
    x = Activation('relu')(x)
    return x 
Example #3
Source File: inception_v4.py    From Triplet-deep-hash-pytorch with Apache License 2.0 6 votes vote down vote up
def conv2d_bn(x, nb_filter, num_row, num_col,
              padding='same', strides=(1, 1), use_bias=False):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, (num_row, num_col),
                      strides=strides,
                      padding=padding,
                      use_bias=use_bias,
                      kernel_regularizer=regularizers.l2(0.00004),
                      kernel_initializer=initializers.VarianceScaling(scale=2.0, mode='fan_in', distribution='normal',
                                                                      seed=None))(x)
    x = BatchNormalization(axis=channel_axis, momentum=0.9997, scale=False)(x)
    x = Activation('relu')(x)
    return x 
Example #4
Source File: nn_blocks.py    From faceswap with GNU General Public License v3.0 4 votes vote down vote up
def res_block(self, input_tensor, filters, kernel_size=3, padding="same", **kwargs):
        """ Residual block.

        Parameters
        ----------
        input_tensor: tensor
            The input tensor to the layer
        filters: int
            The dimensionality of the output space (i.e. the number of output filters in the
            convolution)
        kernel_size: int, optional
            An integer or tuple/list of 2 integers, specifying the height and width of the 2D
            convolution window. Can be a single integer to specify the same value for all spatial
            dimensions. Default: 3
        padding: ["valid", "same"], optional
            The padding to use. Default: `"same"`
        kwargs: dict
            Any additional Keras standard layer keyword arguments

        Returns
        -------
        tensor
            The output tensor from the Upscale layer
        """
        logger.debug("input_tensor: %s, filters: %s, kernel_size: %s, kwargs: %s)",
                     input_tensor, filters, kernel_size, kwargs)
        name = self._get_name("residual_{}".format(input_tensor.shape[1]))
        var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_0".format(name))(input_tensor)
        if self.use_reflect_padding:
            var_x = ReflectionPadding2D(stride=1,
                                        kernel_size=kernel_size,
                                        name="{}_reflectionpadding2d_0".format(name))(var_x)
            padding = "valid"
        var_x = self.conv2d(var_x, filters,
                            kernel_size=kernel_size,
                            padding=padding,
                            name="{}_conv2d_0".format(name),
                            **kwargs)
        var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_1".format(name))(var_x)
        if self.use_reflect_padding:
            var_x = ReflectionPadding2D(stride=1,
                                        kernel_size=kernel_size,
                                        name="{}_reflectionpadding2d_1".format(name))(var_x)
            padding = "valid"
        if not self.use_convaware_init:
            original_init = self._switch_kernel_initializer(kwargs, VarianceScaling(
                scale=0.2,
                mode="fan_in",
                distribution="uniform"))
        var_x = self.conv2d(var_x, filters,
                            kernel_size=kernel_size,
                            padding=padding,
                            **kwargs)
        if not self.use_convaware_init:
            self._switch_kernel_initializer(kwargs, original_init)
        var_x = Add()([var_x, input_tensor])
        var_x = LeakyReLU(alpha=0.2, name="{}_leakyrelu_3".format(name))(var_x)
        return var_x

    # <<< Unbalanced Model Blocks >>> # 
Example #5
Source File: census_income_demo.py    From keras-mmoe with MIT License 4 votes vote down vote up
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label, output_info = data_preparation()
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Set up the input layer
    input_layer = Input(shape=(num_features,))

    # Set up MMoE layer
    mmoe_layers = MMoE(
        units=4,
        num_experts=8,
        num_tasks=2
    )(input_layer)

    output_layers = []

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(
            units=8,
            activation='relu',
            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(
            units=output_info[index][0],
            name=output_info[index][1],
            activation='softmax',
            kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    adam_optimizer = Adam()
    model.compile(
        loss={'income': 'binary_crossentropy', 'marital': 'binary_crossentropy'},
        optimizer=adam_optimizer,
        metrics=['accuracy']
    )

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(
        x=train_data,
        y=train_label,
        validation_data=(validation_data, validation_label),
        callbacks=[
            ROCCallback(
                training_data=(train_data, train_label),
                validation_data=(validation_data, validation_label),
                test_data=(test_data, test_label)
            )
        ],
        epochs=100
    ) 
Example #6
Source File: synthetic_demo.py    From keras-mmoe with MIT License 4 votes vote down vote up
def main():
    # Load the data
    train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation()
    num_features = train_data.shape[1]

    print('Training data shape = {}'.format(train_data.shape))
    print('Validation data shape = {}'.format(validation_data.shape))
    print('Test data shape = {}'.format(test_data.shape))

    # Set up the input layer
    input_layer = Input(shape=(num_features,))

    # Set up MMoE layer
    mmoe_layers = MMoE(
        units=16,
        num_experts=8,
        num_tasks=2
    )(input_layer)

    output_layers = []

    output_info = ['y0', 'y1']

    # Build tower layer from MMoE layer
    for index, task_layer in enumerate(mmoe_layers):
        tower_layer = Dense(
            units=8,
            activation='relu',
            kernel_initializer=VarianceScaling())(task_layer)
        output_layer = Dense(
            units=1,
            name=output_info[index],
            activation='linear',
            kernel_initializer=VarianceScaling())(tower_layer)
        output_layers.append(output_layer)

    # Compile model
    model = Model(inputs=[input_layer], outputs=output_layers)
    learning_rates = [1e-4, 1e-3, 1e-2]
    adam_optimizer = Adam(lr=learning_rates[0])
    model.compile(
        loss={'y0': 'mean_squared_error', 'y1': 'mean_squared_error'},
        optimizer=adam_optimizer,
        metrics=[metrics.mae]
    )

    # Print out model architecture summary
    model.summary()

    # Train the model
    model.fit(
        x=train_data,
        y=train_label,
        validation_data=(validation_data, validation_label),
        epochs=100
    )