Python keras.applications.mobilenet.DepthwiseConv2D() Examples

The following are 13 code examples of keras.applications.mobilenet.DepthwiseConv2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.applications.mobilenet , or try the search function .
Example #1
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_depthwise_conv_valid_pad(self):
        np.random.seed(1988)
        input_dim = 16
        input_shape = (input_dim, input_dim, 3)
        depth_multiplier = 1
        kernel_height = 3
        kernel_width = 3

        # Define a model
        model = Sequential()
        model.add(
            DepthwiseConv2D(
                depth_multiplier=depth_multiplier,
                kernel_size=(kernel_height, kernel_width),
                input_shape=input_shape,
                padding="valid",
                strides=(1, 1),
            )
        )

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #2
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_depthwise_conv_same_pad_depth_multiplier(self):
        np.random.seed(1988)
        input_dim = 16
        input_shape = (input_dim, input_dim, 3)
        depth_multiplier = 4
        kernel_height = 3
        kernel_width = 3

        # Define a model
        model = Sequential()
        model.add(
            DepthwiseConv2D(
                depth_multiplier=depth_multiplier,
                kernel_size=(kernel_height, kernel_width),
                input_shape=input_shape,
                padding="same",
                strides=(1, 1),
            )
        )

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #3
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
        np.random.seed(1988)
        input_dim = 16
        input_shape = (input_dim, input_dim, 3)
        depth_multiplier = 2
        kernel_height = 3
        kernel_width = 3

        # Define a model
        model = Sequential()
        model.add(
            DepthwiseConv2D(
                depth_multiplier=depth_multiplier,
                kernel_size=(kernel_height, kernel_width),
                input_shape=input_shape,
                padding="valid",
                strides=(1, 1),
            )
        )

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #4
Source File: keras_to_coreml.py    From face_landmark_dnn with MIT License 5 votes vote down vote up
def keras_to_coreml():
    with custom_object_scope({'smoothL1': smoothL1, 'relu6': relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}):
        ml_model = load_model(MODEL_PATH)
    coreml_model = coremltools.converters.keras.convert(ml_model, 
                                                        input_names='image', image_input_names='image', 
                                                        is_bgr=False)
    coreml_model.save(ML_MODEL_PATH) 
Example #5
Source File: keras2_parser.py    From MMdnn with MIT License 5 votes vote down vote up
def __init__(self, model):
        super(Keras2Parser, self).__init__()

        # load model files into Keras graph
        if isinstance(model, _string_types):
            try:
                # Keras 2.1.6
                from keras.applications.mobilenet import relu6
                from keras.applications.mobilenet import DepthwiseConv2D
                model = _keras.models.load_model(
                    model,
                    custom_objects={
                        'relu6': _keras.applications.mobilenet.relu6,
                        'DepthwiseConv2D': _keras.applications.mobilenet.DepthwiseConv2D
                    }
                )
            except:
                # Keras. 2.2.2
                import keras.layers as layers
                model = _keras.models.load_model(
                    model,
                    custom_objects={
                        'relu6': layers.ReLU(6, name='relu6'),
                        'DepthwiseConv2D': layers.DepthwiseConv2D
                    }
                )
            self.weight_loaded = True

        elif isinstance(model, tuple):
            model = self._load_model(model[0], model[1])

        else:
            assert False

        # _keras.utils.plot_model(model, "model.png", show_shapes = True)

        # Build network graph
        self.data_format = _keras.backend.image_data_format()
        self.keras_graph = Keras2Graph(model)
        self.keras_graph.build()
        self.lambda_layer_count = 0 
Example #6
Source File: keras2_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def _emit_convolution(self, IR_node, conv_type):
        self.used_layers.add('Conv')
        # assert IR_node.get_attr('group', 1) == 1
        group = IR_node.get_attr("group", 1)

        if conv_type.endswith('Transpose'):
            filters = IR_node.get_attr('kernel_shape')[-2]
        else:
            filters = IR_node.get_attr('kernel_shape')[-1]

        filters_str = 'filters={}'.format(filters) if not conv_type.endswith('DepthwiseConv2D') else 'depth_multiplier={}'.format(filters)
        # change dw from filters to 1


        input_node, padding = self._defuse_padding(IR_node)

        dilations = IR_node.get_attr('dilations')

        if not dilations or len(dilations) == 2:
            # reset the default dilation
            dilations = [1] * len(IR_node.get_attr('kernel_shape'))

        code = "{:<15} = convolution(weights_dict, name='{}', input={}, group={}, conv_type='{}', {}, kernel_size={}, strides={}, dilation_rate={}, padding='{}', use_bias={})".format(
            IR_node.variable_name,
            IR_node.name,
            input_node,
            group,
            conv_type,
            filters_str,
            tuple(IR_node.get_attr('kernel_shape')[:-2]),
            tuple(IR_node.get_attr('strides')[1:-1]),
            tuple(dilations[1:-1]),
            padding,
            IR_node.get_attr('use_bias'))

        return code 
Example #7
Source File: keras2_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def emit_DepthwiseConv(self, IR_node, in_scope=False):
        try:
            from keras.applications.mobilenet import DepthwiseConv2D
            return self._emit_convolution(IR_node, 'keras.applications.mobilenet.DepthwiseConv2D')
        except:
            return self._emit_convolution(IR_node, 'layers.DepthwiseConv2D') 
Example #8
Source File: keras2_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def _layer_Conv(self):
        self.add_body(0, """
def convolution(weights_dict, name, input, group, conv_type, filters=None, **kwargs):
    if not conv_type.startswith('layer'):
        layer = keras.applications.mobilenet.DepthwiseConv2D(name=name, **kwargs)(input)
        return layer
    elif conv_type == 'layers.DepthwiseConv2D':
        layer = layers.DepthwiseConv2D(name=name, **kwargs)(input)
        return layer
    
    inp_filters = K.int_shape(input)[-1]
    inp_grouped_channels = int(inp_filters / group)
    out_grouped_channels = int(filters / group)
    group_list = []
    if group == 1:
        func = getattr(layers, conv_type.split('.')[-1])
        layer = func(name = name, filters = filters, **kwargs)(input)
        return layer
    weight_groups = list()
    if not weights_dict == None:
        w = np.array(weights_dict[name]['weights'])
        weight_groups = np.split(w, indices_or_sections=group, axis=-1)
    for c in range(group):
        x = layers.Lambda(lambda z: z[..., c * inp_grouped_channels:(c + 1) * inp_grouped_channels])(input)
        x = layers.Conv2D(name=name + "_" + str(c), filters=out_grouped_channels, **kwargs)(x)
        weights_dict[name + "_" + str(c)] = dict()
        weights_dict[name + "_" + str(c)]['weights'] = weight_groups[c]
        group_list.append(x)
    layer = layers.concatenate(group_list, axis = -1)
    if 'bias' in weights_dict[name]:
        b = K.variable(weights_dict[name]['bias'], name = name + "_bias")
        layer = layer + b
    return layer""") 
Example #9
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tiny_depthwise_conv_same_pad(self):
        np.random.seed(1988)
        input_dim = 16
        input_shape = (input_dim, input_dim, 3)
        depth_multiplier = 1
        kernel_height = 3
        kernel_width = 3

        # Define a model
        model = Sequential()
        model.add(
            DepthwiseConv2D(
                depth_multiplier=depth_multiplier,
                kernel_size=(kernel_height, kernel_width),
                input_shape=input_shape,
                padding="same",
                strides=(1, 1),
            )
        )

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #10
Source File: mobilenet_v2.py    From FaceRecognition with MIT License 5 votes vote down vote up
def _bottleneck(inputs, filters, kernel, t, s, r=False):
    """Bottleneck
    This function defines a basic bottleneck structure.

    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.

    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    x = _conv_block(inputs, tchannel, (1, 1), (1, 1))

    x = DepthwiseConv2D(kernel, strides=(s, s),
                        depth_multiplier=1, padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation(relu6)(x)

    x = Conv2D(filters, (1, 1), strides=(1, 1), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)

    if r:
        x = add([x, inputs])
    return x 
Example #11
Source File: keras2_parser.py    From MMdnn with MIT License 4 votes vote down vote up
def _load_model(self, model_network_path, model_weight_path):
        """Load a keras model from disk

        Parameters
        ----------
        model_network_path: str
            Path where the model network path is (json file)

        model_weight_path: str
            Path where the model network weights are (hd5 file)

        Returns
        -------
        model: A keras model
        """
        from keras.models import model_from_json

        # Load the model network
        json_file = open(model_network_path, 'r')
        loaded_model_json = json_file.read()
        json_file.close()

        # Load the model weights

        try:
            from keras.applications.mobilenet import relu6
            from keras.applications.mobilenet import DepthwiseConv2D
            loaded_model = model_from_json(loaded_model_json, custom_objects={
                'relu6': _keras.applications.mobilenet.relu6,
                'DepthwiseConv2D': _keras.applications.mobilenet.DepthwiseConv2D})
        except:
            import keras.layers as layers
            loaded_model = model_from_json(loaded_model_json, custom_objects={
                'relu6': layers.ReLU(6, name='relu6'),
                'DepthwiseConv2D': layers.DepthwiseConv2D})


        if model_weight_path:
            if os.path.isfile(model_weight_path):
                loaded_model.load_weights(model_weight_path)
                self.weight_loaded = True
                print("Network file [{}] and [{}] is loaded successfully.".format(model_network_path, model_weight_path))

            else:
                print("Warning: Weights File [%s] is not found." % (model_weight_path))

        return loaded_model 
Example #12
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_tiny_mobilenet_arch(self, model_precision=_MLMODEL_FULL_PRECISION):
        def ReLU6(x, name):
            if keras.__version__ >= _StrictVersion("2.2.1"):
                return ReLU(6.0, name=name)(x)
            else:
                return Activation(relu6, name=name)(x)

        img_input = Input(shape=(32, 32, 3))
        x = Conv2D(
            4, (3, 3), padding="same", use_bias=False, strides=(2, 2), name="conv1"
        )(img_input)
        x = BatchNormalization(axis=-1, name="conv1_bn")(x)
        x = ReLU6(x, name="conv1_relu")

        x = DepthwiseConv2D(
            (3, 3),
            padding="same",
            depth_multiplier=1,
            strides=(1, 1),
            use_bias=False,
            name="conv_dw_1",
        )(x)
        x = BatchNormalization(axis=-1, name="conv_dw_1_bn")(x)
        x = ReLU6(x, name="conv_dw_1_relu")

        x = Conv2D(
            8, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_1"
        )(x)
        x = BatchNormalization(axis=-1, name="conv_pw_1_bn")(x)
        x = ReLU6(x, name="conv_pw_1_relu")

        x = DepthwiseConv2D(
            (3, 3),
            padding="same",
            depth_multiplier=1,
            strides=(2, 2),
            use_bias=False,
            name="conv_dw_2",
        )(x)
        x = BatchNormalization(axis=-1, name="conv_dw_2_bn")(x)
        x = ReLU6(x, name="conv_dw_2_relu")

        x = Conv2D(
            8, (1, 1), padding="same", use_bias=False, strides=(2, 2), name="conv_pw_2"
        )(x)
        x = BatchNormalization(axis=-1, name="conv_pw_2_bn")(x)
        x = ReLU6(x, name="conv_pw_2_relu")

        model = Model(inputs=[img_input], outputs=[x])

        self._test_model(model, delta=1e-2, model_precision=model_precision) 
Example #13
Source File: MobileNetV2.py    From MobileNet_V2_Keras with MIT License 4 votes vote down vote up
def InvertedResidualBlock(x, expand, out_channels, repeats, stride, weight_decay, block_id):
    '''
    This function defines a sequence of 1 or more identical layers, referring to Table 2 in the original paper.
    :param x: Input Keras tensor in (B, H, W, C_in)
    :param expand: expansion factor in bottlenect residual block
    :param out_channels: number of channels in the output tensor
    :param repeats: number of times to repeat the inverted residual blocks including the one that changes the dimensions.
    :param stride: stride for the 1x1 convolution
    :param weight_decay: hyperparameter for the l2 penalty
    :param block_id: as its name tells
    :return: Output tensor (B, H_new, W_new, out_channels)

    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
    in_channels = K.int_shape(x)[channel_axis]
    x = Conv2D(expand * in_channels, 1, padding='same', strides=stride, use_bias=False,
                kernel_regularizer=l2(weight_decay), name='conv_%d_0' % block_id)(x)
    x = BatchNormalization(epsilon=1e-5, momentum=0.9, name='conv_%d_0_bn' % block_id)(x)
    x = Relu6(x, name='conv_%d_0_act_1' % block_id)
    x = DepthwiseConv2D((3, 3),
                        padding='same',
                        depth_multiplier=1,
                        strides=1,
                        use_bias=False,
                        kernel_regularizer=l2(weight_decay),
                        name='conv_dw_%d_0' % block_id )(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_dw_%d_0_bn' % block_id)(x)
    x = Relu6(x, name='conv_%d_0_act_2' % block_id)
    x = Conv2D(out_channels, 1, padding='same', strides=1, use_bias=False,
               kernel_regularizer=l2(weight_decay), name='conv_bottleneck_%d_0' % block_id)(x)
    x = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_bottlenet_%d_0_bn' % block_id)(x)

    for i in xrange(1, repeats):
        x1 = Conv2D(expand*out_channels, 1, padding='same', strides=1, use_bias=False,
                    kernel_regularizer=l2(weight_decay), name='conv_%d_%d' % (block_id, i))(x)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5,momentum=0.9,name='conv_%d_%d_bn' % (block_id, i))(x1)
        x1 = Relu6(x1,name='conv_%d_%d_act_1' % (block_id, i))
        x1 = DepthwiseConv2D((3, 3),
                            padding='same',
                            depth_multiplier=1,
                            strides=1,
                            use_bias=False,
                            kernel_regularizer=l2(weight_decay),
                            name='conv_dw_%d_%d' % (block_id, i))(x1)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5,momentum=0.9, name='conv_dw_%d_%d_bn' % (block_id, i))(x1)
        x1 = Relu6(x1, name='conv_dw_%d_%d_act_2' % (block_id, i))
        x1 = Conv2D(out_channels, 1, padding='same', strides=1, use_bias=False,
                    kernel_regularizer=l2(weight_decay),name='conv_bottleneck_%d_%d' % (block_id, i))(x1)
        x1 = BatchNormalization(axis=channel_axis, epsilon=1e-5, momentum=0.9, name='conv_bottlenet_%d_%d_bn' % (block_id, i))(x1)
        x = add([x, x1], name='block_%d_%d_output' % (block_id, i))
    return x