Python keras.layers.core.Permute() Examples

The following are 22 code examples of keras.layers.core.Permute(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.core , or try the search function .
Example #1
Source File: __init__.py    From deep_complex_networks with MIT License 6 votes vote down vote up
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))

    conv = ComplexConv1D(
        32, 512, strides=16,
        activation='relu')(inputs)
    pool = AveragePooling1D(pool_size=4, strides=2)(conv)

    pool = Permute([2, 1])(pool)
    flattened = Flatten()(pool)

    dense = ComplexDense(2048, activation='relu')(flattened)
    predictions = ComplexDense(
        output_size, 
        activation='sigmoid',
        bias_initializer=Constant(value=-5))(dense)
    predictions = GetReal(predictions)
    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
Example #2
Source File: densenet.py    From chinese_ocr with MIT License 5 votes vote down vote up
def dense_cnn(input, nclass):

    _dropout_rate = 0.2 
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)
   
    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred 
Example #3
Source File: densenet.py    From chinese_ocr with MIT License 5 votes vote down vote up
def dense_cnn(input, nclass):
    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)

    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred


# input = Input(shape=(32, 280, 1), name='the_input')
# dense_cnn(input, 5000) 
Example #4
Source File: test_keras.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute

        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        six.assertCountEqual(
            self, input_names, [x.name for x in spec.description.input]
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        six.assertCountEqual(
            self, output_names, [x.name for x in spec.description.output]
        )

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute) 
Example #5
Source File: test_keras2.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_permute(self):
        """
        Test the conversion of pooling layer.
        """
        from keras.layers.core import Permute

        # Create a simple Keras model
        model = Sequential()
        model.add(Permute((3, 2, 1), input_shape=(10, 64, 3)))

        input_names = ["input"]
        output_names = ["output"]
        spec = keras.convert(model, input_names, output_names).get_spec()
        self.assertIsNotNone(spec)

        # Test the model class
        self.assertIsNotNone(spec.description)
        self.assertTrue(spec.HasField("neuralNetwork"))

        # Test the inputs and outputs
        self.assertEquals(len(spec.description.input), len(input_names))
        self.assertEqual(
            sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))
        )
        self.assertEquals(len(spec.description.output), len(output_names))
        self.assertEqual(
            sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))
        )

        # Test the layer parameters.
        layers = spec.neuralNetwork.layers
        layer_0 = layers[0]
        self.assertIsNotNone(layer_0.permute) 
Example #6
Source File: densenet.py    From chinese_ocr with Apache License 2.0 5 votes vote down vote up
def dense_cnn(input, nclass):

    _dropout_rate = 0.2 
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)
   
    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred 
Example #7
Source File: densenet.py    From chinese_ocr with Apache License 2.0 5 votes vote down vote up
def dense_cnn(input, nclass):

    _dropout_rate = 0.2 
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)
   
    # 64 + 8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    # basemodel = Model(inputs=input, outputs=y_pred)
    # basemodel.summary()

    return y_pred 
Example #8
Source File: densenet.py    From deep_learning with MIT License 5 votes vote down vote up
def dense_cnn(input, nclass):

    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64  5*5 s=2
    x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
               use_bias=False, kernel_regularizer=l2(_weight_decay))(input)

    # 64 +  8 * 8 = 128
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    #128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    #128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
    #192->128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    #128 + 8 * 8 = 192
    x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    x = Permute((2, 1, 3), name='permute')(x)
    x = TimeDistributed(Flatten(), name='flatten')(x)
    y_pred = Dense(nclass, name='out', activation='softmax')(x)

    basemodel = Model(inputs=input,outputs=y_pred)
    basemodel.summary()
    return basemodel 
Example #9
Source File: topcoder_crnn.py    From crnn-lid with GNU General Public License v3.0 5 votes vote down vote up
def create_model(input_shape, config, is_training=True):

    weight_decay = 0.001

    model = Sequential()

    model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # (bs, y, x, c) --> (bs, x, y, c)
    model.add(Permute((2, 1, 3)))

    # (bs, x, y, c) --> (bs, x, y * c)
    bs, x, y, c = model.layers[-1].output_shape
    model.add(Reshape((x, y*c)))

    model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat"))
    model.add(Dense(config["num_classes"], activation="softmax"))

    return model 
Example #10
Source File: motion_MNetArt.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
        dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
        l2_reg=1e-4

        #(4 stages-each 2 convs)(378,722 params)(for 40x40x10)
        input_t=Input(shape=(1,int(patchSize[0, 0]),int(patchSize[0, 1]), int(patchSize[0, 2])))
        input2D_t=Permute((4,1,2,3))(input_t)
        input2D_t=Reshape(target_shape=(int(patchSize[0, 2]),int(patchSize[0, 0]), int(patchSize[0, 1])))(
            input2D_t)
        #use zDimension as number of channels
        twoD_t=Conv2D(16,
                      kernel_size=(7,7),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(l2_reg),
                      strides=(1,1)
                      )(input2D_t)
        twoD_t = Activation('relu')(twoD_t)

        l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
        l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
        l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

        stage1_res1_t=fCreateMNet_Block(twoD_t,16,kernel_size=(3,3), forwarding=True, l2_reg=l2_reg)
        stage1_res2_t=fCreateMNet_Block(stage1_res1_t,32,kernel_size=(3,3), forwarding=False, l2_reg=l2_reg)

        stage2_inp_t=fCreateMaxPooling2D(stage1_res2_t, stride=(2,2))
        stage2_inp_t=concatenate([stage2_inp_t,l_w2_t], axis=1)
        stage2_res1_t=fCreateMNet_Block(stage2_inp_t,32,l2_reg=l2_reg)
        stage2_res2_t=fCreateMNet_Block(stage2_res1_t,48, forwarding=False)

        stage3_inp_t=fCreateMaxPooling2D(stage2_res2_t, stride=(2,2))
        stage3_inp_t=concatenate([stage3_inp_t,l_w3_t], axis=1)
        stage3_res1_t=fCreateMNet_Block(stage3_inp_t,48,l2_reg=l2_reg)
        stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False,l2_reg=l2_reg)

        stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
        stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
        stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64,l2_reg=l2_reg)
        stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False,l2_reg=l2_reg)

        after_flat_t = Flatten()(stage4_res2_t)

        after_dense_t = Dense(units=2,
                              kernel_initializer='he_normal',
                              kernel_regularizer=l2(l2_reg))(after_flat_t)
        output_t = Activation('softmax')(after_dense_t)

        cnn = Model(inputs=[input_t], outputs=[output_t])

        opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
        cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
        sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example #11
Source File: encoder.py    From enet-keras with MIT License 4 votes vote down vote up
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                     # padding='same',
                     strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    
    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other, indices = MaxPoolingWithArgmax2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    if downsample:
        return encoder, indices
    else:
        return encoder 
Example #12
Source File: encoder.py    From enet-keras with MIT License 4 votes vote down vote up
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    
    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder 
Example #13
Source File: topcoder_crnn_finetune.py    From crnn-lid with GNU General Public License v3.0 4 votes vote down vote up
def create_model(input_shape, config):

    weight_decay = 0.001

    model = Sequential()

    model.add(Convolution2D(16, 7, 7, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(32, 5, 5, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 1)))

    model.load_weights("logs/2017-04-08-13-03-44/weights.08.model", by_name=True)
    # for ref_layer in ref_model.layers:
    #     layer = model.get_layer(ref_layer.name)
    #     if layer:
    #         layer.set_weights(ref_layer.get_weights())

    for layer in model.layers:
        layer.trainable = False

    # (bs, y, x, c) --> (bs, x, y, c)
    model.add(Permute((2, 1, 3)))

    # (bs, x, y, c) --> (bs, x, y * c)
    bs, x, y, c = model.layers[-1].output_shape
    model.add(Reshape((x, y*c)))

    model.add(Bidirectional(LSTM(512, return_sequences=False), merge_mode="concat"))
    model.add(Dense(config["num_classes"], activation="softmax"))

    return model 
Example #14
Source File: crnn.py    From crnn-lid with GNU General Public License v3.0 4 votes vote down vote up
def create_model(input_shape, config, is_training=True):

    weight_decay = 0.001

    model = Sequential()

    model.add(Convolution2D(64, 3, 3, W_regularizer=l2(weight_decay), activation="relu", input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(128, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(256, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Convolution2D(512, 3, 3, W_regularizer=l2(weight_decay), activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

    # (bs, y, x, c) --> (bs, x, y, c)
    model.add(Permute((2, 1, 3)))

    # (bs, x, y, c) --> (bs, x, y * c)
    bs, x, y, c = model.layers[-1].output_shape
    model.add(Reshape((x, y*c)))

    model.add(Bidirectional(LSTM(256, return_sequences=False), merge_mode="concat"))
    model.add(Dense(config["num_classes"], activation="softmax"))

    return model 
Example #15
Source File: embeddings.py    From fancy-cnn with MIT License 4 votes vote down vote up
def sentence_embedding(sentence_len, wv_params, wv_size,
                       input_name='sentence_embedding', output_name='vector_embedding'):
    '''
    Creates an embedding of word vectors into a sentence image.

    Args:
    -----
        sentence_len: length of sentences to be passed

        wv_params: a dict of the following format

                        wv_params = {
                            'fixed_wv' : 
                            {
                                'vocab_size' : 1000,
                                'init' : None,
                                'fixed' : True
                            },
                            'floating_wv' : 
                            {
                                'vocab_size' : 1000,
                                'init' : None,
                                'fixed' : False
                            }
                        }
            the keys of the dictionary are the names in the keras graph model, and
            you can have any number of word vector layers encoded.

        input_name: the name of the input node for the graph

        output_name: the name of the output node for the graph

    Returns:
    --------

        a keras container that takes as input an integer array with shape (n_samples, n_words), and returns 
        shape (n_samples, wv_channels, len_sentence, wv_dim)!
    '''
    # -- output is (n_samples, n_channels, n_words, wv_dim)
    g = SubGraph()

    if KERAS_BACKEND:
        g.add_input(input_name, (sentence_len, ), dtype='int')
    else:
        g.add_input(input_name, (-1, ), dtype='int')

    for name, params in wv_params.iteritems():
        # g.add_input(params['input_name'], (-1, ), dtype='int')
        g.add_node(make_embedding(wv_size=wv_size, **params), name=name, input=input_name)

    g.add_node(Reshape((sentence_len, len(wv_params), wv_size)), name='reshape',
               inputs=wv_params.keys(), merge_mode='concat')
    g.add_node(Permute(dims=(2, 1, 3)), name='permute', input='reshape')
    
    # -- output is of shape (nb_samples, nb_wv_channels, len_sentence, wv_dim)
    g.add_output(name=output_name, input='permute')
    return g 
Example #16
Source File: MNetArt.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4

    # (4 stages-each 2 convs)(378,722 params)(for 40x40x10)
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))
    input2D_t = Permute((4, 1, 2, 3))(input_t)
    input2D_t = Reshape(target_shape=(int(patchSize[0, 2]), int(patchSize[0, 0]), int(patchSize[0, 1])))(
        input2D_t)
    # use zDimension as number of channels
    twoD_t = Conv2D(16,
                    kernel_size=(7, 7),
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    strides=(1, 1)
                    )(input2D_t)
    twoD_t = Activation('relu')(twoD_t)

    l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
    l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
    l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

    stage1_res1_t = fCreateMNet_Block(twoD_t, 16, kernel_size=(3, 3), forwarding=True, l2_reg=l2_reg)
    stage1_res2_t = fCreateMNet_Block(stage1_res1_t, 32, kernel_size=(3, 3), forwarding=False, l2_reg=l2_reg)

    stage2_inp_t = fCreateMaxPooling2D(stage1_res2_t, stride=(2, 2))
    stage2_inp_t = concatenate([stage2_inp_t, l_w2_t], axis=1)
    stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example #17
Source File: MNetArt.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4

    # (4 stages-each 2 convs)(378,722 params)(for 40x40x10)
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))
    input2D_t = Permute((4, 1, 2, 3))(input_t)
    input2D_t = Reshape(target_shape=(int(patchSize[0, 2]), int(patchSize[0, 0]), int(patchSize[0, 1])))(
        input2D_t)
    # use zDimension as number of channels
    twoD_t = Conv2D(16,
                    kernel_size=(7, 7),
                    padding='same',
                    kernel_initializer='he_normal',
                    kernel_regularizer=l2(l2_reg),
                    strides=(1, 1)
                    )(input2D_t)
    twoD_t = Activation('relu')(twoD_t)

    l_w2_t = fCreateMaxPooling2D(twoD_t, stride=(2, 2))
    l_w3_t = fCreateMaxPooling2D(l_w2_t, stride=(2, 2))
    l_w4_t = fCreateMaxPooling2D(l_w3_t, stride=(2, 2))

    stage1_res1_t = fCreateMNet_Block(twoD_t, 16, kernel_size=(3, 3), forwarding=True, l2_reg=l2_reg)
    stage1_res2_t = fCreateMNet_Block(stage1_res1_t, 32, kernel_size=(3, 3), forwarding=False, l2_reg=l2_reg)

    stage2_inp_t = fCreateMaxPooling2D(stage1_res2_t, stride=(2, 2))
    stage2_inp_t = concatenate([stage2_inp_t, l_w2_t], axis=1)
    stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg) 
Example #18
Source File: tiramisu.py    From neural-road-inspector with MIT License 4 votes vote down vote up
def get_tiramisu(self):
		model = self.model = models.Sequential()
		# cropping
		# model.add(Cropping2D(cropping=((68, 68), (128, 128)), input_shape=(3, 360,480)))

		model.add(Conv2D(48, 
						kernel_size=(3, 3), 
						padding='same', 
						input_shape=(self.img_rows, self.img_cols, self.num_channels),
						kernel_initializer="he_uniform",
						kernel_regularizer = l2(0.0001),
						data_format='channels_last'))

		self.DenseBlock(5,108) # 5*12 = 60 + 48 = 108
		self.TransitionDown(108)
		self.DenseBlock(5,168) # 5*12 = 60 + 108 = 168
		self.TransitionDown(168)
		self.DenseBlock(5,228) # 5*12 = 60 + 168 = 228
		self.TransitionDown(228)
		self.DenseBlock(5,288)# 5*12 = 60 + 228 = 288
		self.TransitionDown(288)
		self.DenseBlock(5,348) # 5*12 = 60 + 288 = 348
		self.TransitionDown(348)

		self.DenseBlock(15,408) # m = 348 + 5*12 = 408

		self.TransitionUp(468, (468, self.img_rows/32, self.img_cols/32), (None, 468, self.img_rows/16, self.img_cols/16))
		self.DenseBlock(5,468)

		self.TransitionUp(408, (408, self.img_rows/16, self.img_cols/16), (None, 408, self.img_rows/8, self.img_cols/8))
		self.DenseBlock(5,408)

		self.TransitionUp(348, (348, self.img_rows/8, self.img_cols/8), (None, 348, self.img_rows/4, self.img_cols/4))
		self.DenseBlock(5,348)

		self.TransitionUp(288, (288, self.img_rows/4, self.img_cols/4), (None, 288, self.img_rows/2, self.img_cols/2))
		self.DenseBlock(5,288)

		self.TransitionUp(228, (228, self.img_rows/2, self.img_cols/2), (None, 228, self.img_rows, self.img_cols))
		self.DenseBlock(5,228)

		model.add(Conv2D(12, 
						kernel_size=(1,1), 
						padding='same',
						kernel_initializer="he_uniform",
						kernel_regularizer = l2(0.0001),
						data_format='channels_last'))
		
		model.add(Reshape((12, self.img_rows * self.img_cols)))
		model.add(Permute((2, 1)))
		model.add(Activation('sigmoid'))
		#model.summary()
		return model 
Example #19
Source File: Unet.py    From ECG_UNet with MIT License 4 votes vote down vote up
def Unet(nClasses, optimizer=None, input_length=1800, nChannels=1):
    inputs = Input((input_length, nChannels))
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling1D(pool_size=2)(conv1)

    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling1D(pool_size=2)(conv2)
    
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling1D(pool_size=2)(conv3)

    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Dropout(0.5)(conv4)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)

    up1 = Conv1D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv4))
    merge1 = concatenate([up1, conv3], axis=-1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    
    up2 = Conv1D(32, 2, activation='relu', padding='same', kernel_initializer = 'he_normal')(UpSampling1D(size=2)(conv5))
    merge2 = concatenate([up2, conv2], axis=-1)
    conv6 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv1D(32, 32, activation='relu', padding='same')(conv6)
    
    up3 = Conv1D(16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv6))
    merge3 = concatenate([up3, conv1], axis=-1)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    
    conv8 = Conv1D(nClasses, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    conv8 = core.Reshape((nClasses, input_length))(conv8)
    conv8 = core.Permute((2, 1))(conv8)

    conv9 = core.Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model 
Example #20
Source File: __init__.py    From deep_complex_networks with MIT License 4 votes vote down vote up
def get_deep_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))
    outs = inputs

    outs = (ComplexConv1D(
        16, 6, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        32, 3, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
    
    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='relu',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    #outs = (keras.layers.MaxPooling1D(pool_size=2))
    #outs = (Permute([2, 1]))
    outs = (keras.layers.Flatten())(outs)
    outs = (keras.layers.Dense(2048, activation='relu',
                           kernel_initializer='glorot_normal'))(outs)
    predictions = (keras.layers.Dense(output_size, activation='sigmoid',
                                 bias_initializer=keras.initializers.Constant(value=-5)))(outs)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
Example #21
Source File: model.py    From DeepLearn with MIT License 4 votes vote down vote up
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120, 
        embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):

    print 'Model Uses Basic CNN......'
    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')   
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    
    x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
    y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
    
    x = Permute((2,1))(x)
    y = Permute((2,1))(y)

    conv1 = Reshape((embedding_dim,dimx,1))(x)
    conv2 = Reshape((embedding_dim,dimy,1))(y)   
       
    channel_1, channel_2 = [], []
    
    for dep in range(depth):
        
        #conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
        #conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
        

        ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
                data_format = 'channels_last',border_mode="valid")(conv1)
        ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
                data_format="channels_last",border_mode="valid")(conv2)
                    
            
        #conv1 = GlobalMaxPooling2D()(ques)
        #conv2 = GlobalMaxPooling2D()(ans)
        #conv1 = MaxPooling2D()(ques)
        #conv2 = MaxPooling2D()(ans)
        
        channel_1.append(GlobalMaxPooling2D()(ques))
        channel_2.append(GlobalMaxPooling2D()(ans))
        
        #channel_1.append(GlobalAveragePooling2D()(ques))
        #channel_2.append(GlobalAveragePooling2D()(ans))
    
    h1 = channel_1.pop(-1)
    if channel_1:
        h1 = merge([h1] + channel_1, mode="concat")

    h2 = channel_2.pop(-1)
    if channel_2:
        h2 = merge([h2] + channel_2, mode="concat")
    
    h =  Merge(mode="concat",name='h')([h1, h2])
    #h = Dropout(0.2)(h)
    #h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
    #wrap = Dropout(0.5)(h)
    #wrap = Dense(64, activation='tanh')(h)   
    
    score = Dense(2,activation='softmax',name='score')(h)
    model = Model([inpx, inpy],[score])
    model.compile( loss='categorical_crossentropy',optimizer='adam')
    
    return model 
Example #22
Source File: model.py    From DeepLearn with MIT License 4 votes vote down vote up
def cnn(embedding_matrix, dimx=50, dimy=50, nb_filter = 120, 
        embedding_dim = 50,filter_length = (50,4), vocab_size = 8000, depth = 1):

    print 'Model Uses Basic CNN......'
    
    inpx = Input(shape=(dimx,),dtype='int32',name='inpx')   
    inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
    
    x = word2vec_embedding_layer(embedding_matrix,train=False)(inpx)
    y = word2vec_embedding_layer(embedding_matrix,train=False)(inpy)
    
    x = Permute((2,1))(x)
    y = Permute((2,1))(y)

    conv1 = Reshape((embedding_dim,dimx,1))(x)
    conv2 = Reshape((embedding_dim,dimy,1))(y)   
       
    channel_1, channel_2 = [], []
    
    for dep in range(depth):
        
        #conv1 = ZeroPadding2D((filter_width - 1, 0))(conv1)
        #conv2 = ZeroPadding2D((filter_width - 1, 0))(conv2)
        

        ques = Conv2D(nb_filter=nb_filter, kernel_size = filter_length, activation='relu',
                data_format = 'channels_last',border_mode="valid")(conv1)
        ans = Conv2D(nb_filter, kernel_size = filter_length, activation='relu',
                data_format="channels_last",border_mode="valid")(conv2)
                    
            
        #conv1 = GlobalMaxPooling2D()(ques)
        #conv2 = GlobalMaxPooling2D()(ans)
        #conv1 = MaxPooling2D()(ques)
        #conv2 = MaxPooling2D()(ans)
        
        channel_1.append(GlobalMaxPooling2D()(ques))
        channel_2.append(GlobalMaxPooling2D()(ans))
        
        #channel_1.append(GlobalAveragePooling2D()(ques))
        #channel_2.append(GlobalAveragePooling2D()(ans))
    
    h1 = channel_1.pop(-1)
    if channel_1:
        h1 = merge([h1] + channel_1, mode="concat")

    h2 = channel_2.pop(-1)
    if channel_2:
        h2 = merge([h2] + channel_2, mode="concat")
    
    h =  Merge(mode="concat",name='h')([h1, h2])
    #h = Dropout(0.2)(h)
    #h = Dense(50, kernel_regularizer=regularizers.l2(reg2),activation='relu')(h)
    #wrap = Dropout(0.5)(h)
    #wrap = Dense(64, activation='tanh')(h)   
    
    score = Dense(2,activation='softmax',name='score')(h)
    model = Model([inpx, inpy],[score])
    model.compile( loss='categorical_crossentropy',optimizer='adam')
    
    return model