Python keras.layers.Conv2DTranspose() Examples

The following are 30 code examples of keras.layers.Conv2DTranspose(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: Network.py    From MBLLEN with Apache License 2.0 6 votes vote down vote up
def build_mbllen(input_shape):

    def EM(input, kernal_size, channel):
        conv_1 = Conv2D(channel, (3, 3), activation='relu', padding='same', data_format='channels_last')(input)
        conv_2 = Conv2D(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_1)
        conv_3 = Conv2D(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_2)
        conv_4 = Conv2D(channel*4, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_3)
        conv_5 = Conv2DTranspose(channel*2, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_4)
        conv_6 = Conv2DTranspose(channel, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_5)
        res = Conv2DTranspose(3, (kernal_size, kernal_size), activation='relu', padding='valid', data_format='channels_last')(conv_6)
        return res

    inputs = Input(shape=input_shape)
    FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(inputs)
    EM_com = EM(FEM, 5, 8)

    for j in range(3):
        for i in range(0, 3):
            FEM = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_last')(FEM)
            EM1 = EM(FEM, 5, 8)
            EM_com = Concatenate(axis=3)([EM_com, EM1])

    outputs = Conv2D(3, (1, 1), activation='relu', padding='same', data_format='channels_last')(EM_com)
    return Model(inputs, outputs) 
Example #2
Source File: cnn_architecture.py    From Pix2Depth with GNU General Public License v3.0 6 votes vote down vote up
def model_3():

    input_layer = Input(shape=(224,224,3))
    from keras.layers import Conv2DTranspose as DeConv
    resnet = ResNet50(include_top=False, weights="imagenet")
    resnet.trainable = False

    res_features = resnet(input_layer)

    conv = DeConv(1024, padding="valid", activation="relu", kernel_size=3)(res_features)
    conv = UpSampling2D((2,2))(conv)
    conv = DeConv(512, padding="valid", activation="relu", kernel_size=5)(conv)
    conv = UpSampling2D((2,2))(conv)
    conv = DeConv(128, padding="valid", activation="relu", kernel_size=5)(conv)
    conv = UpSampling2D((2,2))(conv)
    conv = DeConv(32, padding="valid", activation="relu", kernel_size=5)(conv)
    conv = UpSampling2D((2,2))(conv)
    conv = DeConv(8, padding="valid", activation="relu", kernel_size=5)(conv)
    conv = UpSampling2D((2,2))(conv)
    conv = DeConv(4, padding="valid", activation="relu", kernel_size=5)(conv)
    conv = DeConv(1, padding="valid", activation="sigmoid", kernel_size=5)(conv)

    model = Model(inputs=input_layer, outputs=conv)
    return model 
Example #3
Source File: model.py    From n2n-watermark-remove with MIT License 6 votes vote down vote up
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu',
         dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False):
    def _conv_block(m, dim, acti, bn, res, do=0):
        n = Conv2D(dim, 3, activation=acti, padding='same')(m)
        n = BatchNormalization()(n) if bn else n
        n = Dropout(do)(n) if do else n
        n = Conv2D(dim, 3, activation=acti, padding='same')(n)
        n = BatchNormalization()(n) if bn else n

        return Concatenate()([m, n]) if res else n

    def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
        if depth > 0:
            n = _conv_block(m, dim, acti, bn, res)
            m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
            m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res)
            if up:
                m = UpSampling2D()(m)
                m = Conv2D(dim, 2, activation=acti, padding='same')(m)
            else:
                m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
            n = Concatenate()([n, m])
            m = _conv_block(n, dim, acti, bn, res)
        else:
            m = _conv_block(m, dim, acti, bn, res, do)

        return m

    i = Input(shape=(None, None, input_channel_num))
    o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
    o = Conv2D(out_ch, 1)(o)
    model = Model(inputs=i, outputs=o)

    return model 
Example #4
Source File: blocks.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 6 votes vote down vote up
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                      transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
                            padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
        if use_batchnorm:
            x = BatchNormalization(name=bn_name+'1')(x)
        x = Activation('relu', name=relu_name+'1')(x)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #5
Source File: blocks.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 6 votes vote down vote up
def Conv2DTranspose(filters,
                    upsample_rate,
                    kernel_size=(4,4),
                    up_name='up',
                    **kwargs):

    #if not tuple(upsample_rate) == (2,2):
    #    raise NotImplementedError(
    #        f'Conv2DTranspose support only upsample_rate=(2, 2), got {upsample_rate}')

    def layer(input_tensor):
        x = Transpose(filters,
                      kernel_size=kernel_size,
                      strides=upsample_rate,
                      padding='same',
                      name=up_name)(input_tensor)
        return x
    return layer 
Example #6
Source File: UNIT.py    From GAN-MRI with GNU General Public License v3.0 6 votes vote down vote up
def modelGenerator(self, name):
        inputImg = Input(shape=self.latent_dim)
        # Layer 1: 1 res block
        x = self.resblk(inputImg, 256)
        # Layer 2: 2 res block
        x = self.resblk(x, 256)
        # Layer 3: 3 res block
        x = self.resblk(x, 256)
        # Layer 4:
        x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same')(x)
        x = LeakyReLU(alpha=0.01)(x)
        # Layer 5:
        x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same')(x)
        x = LeakyReLU(alpha=0.01)(x)
        # Layer 6
        x = Conv2DTranspose(self.channels, kernel_size=1, strides=1, padding='valid')(x)
        z = Activation("tanh")(x)

        return Model(inputs=inputImg, outputs=z, name=name) 
Example #7
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                      transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
                            padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
        if use_batchnorm:
            x = BatchNormalization(name=bn_name+'1')(x)
        x = Activation('relu', name=relu_name+'1')(x)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #8
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                      transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
                            padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
        if use_batchnorm:
            x = BatchNormalization(name=bn_name+'1')(x)
        x = Activation('relu', name=relu_name+'1')(x)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #9
Source File: model.py    From sfcn-opi with MIT License 6 votes vote down vote up
def classification_branch_wrapper(self, input, softmax_trainable=False):
        x = self.res_block(input, filter=128, stages=9, block=4)
        # all layers before OPI
        x = Conv2D(filters=5, kernel_size=(1, 1), padding='same', name='conv2d_after_fourth_resblock',
                   kernel_regularizer=keras.regularizers.l2(self.l2r))(x)
        x = BatchNormalization(name='bn_after_fourth_resblock')(x)
        x = Activation('relu',name='relu_after_fourth_resblock')(x)
        x = Conv2DTranspose(filters=5, kernel_size=(3, 3),
                            strides=(2, 2), padding='same',
                            kernel_regularizer=keras.regularizers.l2(self.l2r),
                            name='secondlast_deconv_before_cls')(x)
        x = BatchNormalization(name='secondlast_bn_before_cls')(x)
        x = Activation('relu', name='last_relu_before_cls')(x)
        x = Conv2DTranspose(filters=5, kernel_size=(3, 3),
                            strides=(2, 2), padding='same',
                            kernel_regularizer=keras.regularizers.l2(self.l2r),
                            name='last_deconv_before_cls')(x)
        x_output = BatchNormalization(name='last_bn_before_cls')(x)
        if softmax_trainable == True:
            x_output = Activation('softmax', name='Classification_output')(x_output)
        return x_output 
Example #10
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Conv2DTranspose(filters,
                    upsample_rate,
                    kernel_size=(4,4),
                    up_name='up',
                    **kwargs):

    if not tuple(upsample_rate) == (2,2):
        raise NotImplementedError(
            f'Conv2DTranspose support only upsample_rate=(2, 2), got {upsample_rate}')

    def layer(input_tensor):
        x = Transpose(filters,
                      kernel_size=kernel_size,
                      strides=upsample_rate,
                      padding='same',
                      name=up_name)(input_tensor)
        return x
    return layer 
Example #11
Source File: models.py    From srcnn with MIT License 6 votes vote down vote up
def fsrcnn(x, d=56, s=12, m=4, scale=3):
    """Build an FSRCNN model.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f = [5, 1] + [3] * m + [1]
    n = [d, s] + [s] * m + [d]
    for ni, fi in zip(n, f):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2DTranspose(c, 9, strides=scale, padding='same',
                              kernel_initializer='he_normal'))
    return model 
Example #12
Source File: models.py    From srcnn with MIT License 6 votes vote down vote up
def nsfsrcnn(x, d=56, s=12, m=4, scale=3, pos=1):
    """Build an FSRCNN model, but change deconv position.

    See https://arxiv.org/abs/1608.00367
    """
    model = Sequential()
    model.add(InputLayer(input_shape=x.shape[-3:]))
    c = x.shape[-1]
    f1 = [5, 1] + [3] * pos
    n1 = [d, s] + [s] * pos
    f2 = [3] * (m - pos - 1) + [1]
    n2 = [s] * (m - pos - 1) + [d]
    f3 = 9
    n3 = c
    for ni, fi in zip(n1, f1):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2DTranspose(s, 3, strides=scale, padding='same',
                              kernel_initializer='he_normal'))
    for ni, fi in zip(n2, f2):
        model.add(Conv2D(ni, fi, padding='same',
                         kernel_initializer='he_normal', activation='relu'))
    model.add(Conv2D(n3, f3, padding='same',
                         kernel_initializer='he_normal'))
    return model 
Example #13
Source File: test_topology.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convert_weights(layer, weights):
    if layer.__class__.__name__ == 'GRU':
        W = [np.split(w, 3, axis=-1) for w in weights]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D'):
        W = [np.split(w, 4, axis=-1) for w in weights]
        for w in W:
            w[2], w[1] = w[1], w[2]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ == 'Conv2DTranspose':
        return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]]
    return weights 
Example #14
Source File: test_topology.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convert_weights(layer, weights):
    if layer.__class__.__name__ == 'GRU':
        W = [np.split(w, 3, axis=-1) for w in weights]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D'):
        W = [np.split(w, 4, axis=-1) for w in weights]
        for w in W:
            w[2], w[1] = w[1], w[2]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ == 'Conv2DTranspose':
        return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]]
    return weights 
Example #15
Source File: models.py    From fast-neural-style-keras with MIT License 5 votes vote down vote up
def get_evaluate_model(width, height):
    input_o = layers.Input(shape=(height, width, 3), dtype='float32', name='input_o')

    c1 = layers.Conv2D(32, (9, 9), strides=1, padding='same', name='conv_1')(input_o)
    c1 = layers.BatchNormalization(name='normal_1')(c1)
    c1 = layers.Activation('relu', name='relu_1')(c1)

    c2 = layers.Conv2D(64, (3, 3), strides=2, padding='same', name='conv_2')(c1)
    c2 = layers.BatchNormalization(name='normal_2')(c2)
    c2 = layers.Activation('relu', name='relu_2')(c2)

    c3 = layers.Conv2D(128, (3, 3), strides=2, padding='same', name='conv_3')(c2)
    c3 = layers.BatchNormalization(name='normal_3')(c3)
    c3 = layers.Activation('relu', name='relu_3')(c3)

    r1 = residual_block(c3, 1)
    r2 = residual_block(r1, 2)
    r3 = residual_block(r2, 3)
    r4 = residual_block(r3, 4)
    r5 = residual_block(r4, 5)

    d1 = layers.Conv2DTranspose(64, (3, 3), strides=2, padding='same', name='conv_4')(r5)
    d1 = layers.BatchNormalization(name='normal_4')(d1)
    d1 = layers.Activation('relu', name='relu_4')(d1)

    d2 = layers.Conv2DTranspose(32, (3, 3), strides=2, padding='same', name='conv_5')(d1)
    d2 = layers.BatchNormalization(name='normal_5')(d2)
    d2 = layers.Activation('relu', name='relu_5')(d2)

    c4 = layers.Conv2D(3, (9, 9), strides=1, padding='same', name='conv_6')(d2)
    c4 = layers.BatchNormalization(name='normal_6')(c4)
    c4 = layers.Activation('tanh', name='tanh_1')(c4)
    c4 = OutputScale(name='output')(c4)

    model = Model([input_o], c4)
    print("evaluate model built successfully!")
    return model 
Example #16
Source File: gan.py    From GAN-Sandbox with MIT License 5 votes vote down vote up
def generator_network(x):
    def add_common_layers(y):
        y = layers.advanced_activations.LeakyReLU()(y)
        y = layers.Dropout(0.25)(y)
        return y

    x = layers.Dense(1024)(x)
    x = add_common_layers(x)

    #
    # input dimensions to the first de-conv layer in the generator
    #

    height_dim = 7
    width_dim = 7
    assert img_height % height_dim == 0 and img_width % width_dim == 0, \
        'Generator network must be able to transform `x` into a tensor of shape (img_height, img_width, img_channels).'

    x = layers.Dense(height_dim * width_dim * 128)(x)
    x = add_common_layers(x)

    x = layers.Reshape((height_dim, width_dim, -1))(x)

    x = layers.Conv2DTranspose(64, kernel_size, **conv_layer_keyword_args)(x)
    x = add_common_layers(x)

    # number of feature maps => number of image channels
    return layers.Conv2DTranspose(img_channels, 1, strides=2, padding='same', activation='tanh')(x) 
Example #17
Source File: test_tf_keras_layers.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_tiny_deconv_random(self):
      np.random.seed(1988)
      input_dim = 13
      input_shape = (input_dim, input_dim, 5)
      num_kernels = 16
      kernel_height = 3
      kernel_width = 3
      # Define a model
      model = Sequential()
      model.add(Conv2DTranspose(filters=num_kernels, kernel_size=(kernel_height, kernel_width),
                                input_shape=input_shape, padding='valid', strides=(2, 2)))
      # Set some random weights
      model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
      # Test the keras model
      self._test_keras_model(model) 
Example #18
Source File: test_tf_keras_layers.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_tiny_deconv_random_same_padding(self):
      np.random.seed(1988)
      input_dim = 14
      input_shape = (input_dim, input_dim, 3)
      num_kernels = 16
      kernel_height = 3
      kernel_width = 3
      # Define a model
      model = Sequential()
      model.add(Conv2DTranspose(filters=num_kernels, kernel_size=(kernel_height, kernel_width),
                                input_shape=input_shape, padding='same', strides=(2, 2)))
      # Set some random weights
      model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
      # Test the keras model
      self._test_keras_model(model) 
Example #19
Source File: model.py    From noise2noise with MIT License 5 votes vote down vote up
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu',
         dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False):
    def _conv_block(m, dim, acti, bn, res, do=0):
        n = Conv2D(dim, 3, activation=acti, padding='same')(m)
        n = BatchNormalization()(n) if bn else n
        n = Dropout(do)(n) if do else n
        n = Conv2D(dim, 3, activation=acti, padding='same')(n)
        n = BatchNormalization()(n) if bn else n

        return Concatenate()([m, n]) if res else n

    def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
        if depth > 0:
            n = _conv_block(m, dim, acti, bn, res)
            m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
            m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res)
            if up:
                m = UpSampling2D()(m)
                m = Conv2D(dim, 2, activation=acti, padding='same')(m)
            else:
                m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
            n = Concatenate()([n, m])
            m = _conv_block(n, dim, acti, bn, res)
        else:
            m = _conv_block(m, dim, acti, bn, res, do)

        return m

    i = Input(shape=(None, None, input_channel_num))
    o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
    o = Conv2D(out_ch, 1)(o)
    model = Model(inputs=i, outputs=o)

    return model 
Example #20
Source File: model.py    From sfcn-opi with MIT License 5 votes vote down vote up
def detection_branch_wrapper(self, input_one, input_two, trainable=True, softmax_trainable=False):
        x_divergent_one = Conv2D(filters=2, kernel_size=(1, 1), padding='same',
                                 name='conv2D_diverge_one',
                                 trainable=trainable)(input_one)
        x_divergent_one = BatchNormalization(name='bn_diverge_one',
                                             trainable=trainable)(x_divergent_one)
        x_divergent_one = Activation('relu', trainable=trainable)(x_divergent_one)

        x_divergent_two = Conv2D(filters=2, kernel_size=(1, 1), padding='same',
                                 kernel_regularizer=keras.regularizers.l2(self.l2r),
                                 name='conv_diverge_two',
                                 trainable=trainable)(input_two)
        x_divergent_two = BatchNormalization(name='bn_diverge_two',
                                             trainable=trainable)(x_divergent_two)
        x_divergent_two = Activation('relu',
                                     trainable=trainable)(x_divergent_two)

        x_divergent_two = Conv2DTranspose(filters=2, kernel_size=(3, 3), strides=(2, 2), padding='same',
                                          kernel_regularizer=keras.regularizers.l2(self.l2r),
                                          name='deconv_before_summation',
                                          trainable=trainable)(x_divergent_two)
        x_divergent_two = BatchNormalization(name='bn_deconv_diverge_two',
                                             trainable=trainable)(x_divergent_two)
        x_divergent_two = Activation('relu', name='last_detection_act',
                                     trainable=trainable)(x_divergent_two)

        x_merge = Add(name='merge_two_divergence',
                      trainable=trainable)([x_divergent_one, x_divergent_two])
        x_detection = Conv2DTranspose(filters=2, kernel_size=(3, 3), strides=(2, 2), padding='same',
                                      kernel_regularizer=keras.regularizers.l2(self.l2r),
                                      name='Deconv_detection_final_layer',
                                      trainable=trainable)(x_merge)
        x_detection = BatchNormalization(name='last_detection_bn',
                                         trainable=trainable)(x_detection)
        # The detection output
        if softmax_trainable == True:
            x_detection = Activation('softmax', name='Detection_output',
                                     trainable=trainable)(x_detection)
        return x_detection 
Example #21
Source File: layers.py    From deephar with MIT License 5 votes vote down vote up
def conv2dtranspose(x, filters, kernel_size, strides=(1, 1), padding='same',
        name=None):
    """Conv2DTranspose possibly wrapped by a TimeDistributed layer."""
    f = Conv2DTranspose(filters, kernel_size, strides=strides, padding=padding,
            use_bias=False, name=name)

    return TimeDistributed(f, name=name)(x) if K.ndim(x) == 5 else f(x) 
Example #22
Source File: layers.py    From deephar with MIT License 5 votes vote down vote up
def deconv(x, filters, size, strides=(1, 1), padding='same', name=None):
    x = Conv2DTranspose(filters, size, strides=strides, padding=padding,
            data_format=K.image_data_format(), use_bias=False, name=name)(x)
    return x 
Example #23
Source File: CycleGAN.py    From GAN-MRI with GNU General Public License v3.0 5 votes vote down vote up
def uk(self, x, k):
        # (up sampling followed by 1x1 convolution <=> fractional-strided 1/2)
        if self.use_resize_convolution:
            x = UpSampling2D(size=(2, 2))(x)  # Nearest neighbor upsampling
            x = ReflectionPadding2D((1, 1))(x)
            x = Conv2D(filters=k, kernel_size=3, strides=1, padding='valid')(x)
        else:
            x = Conv2DTranspose(filters=k, kernel_size=3, strides=2, padding='same')(x)  # this matches fractionally stided with stride 1/2
        x = self.normalization(axis=3, center=True, epsilon=1e-5)(x, training=True)
        x = Activation('relu')(x)
        return x

#===============================================================================
# Models 
Example #24
Source File: DeepVOG_model.py    From DeepVOG with GNU General Public License v3.0 5 votes vote down vote up
def decoding_block(X, filter_size, filters_num, layer_num, block_type, stage, s = 1, X_jump = 0, up_sampling = True):
    
    # defining name basis
    conv_name_base = 'conv_' + block_type + str(stage) + '_'
    bn_name_base = 'bn_' + block_type + str(stage)  + '_'
    

    # Joining X_jump from encoding side with X_uped
    if X_jump == 0:
        X_joined_input = X
    else:
    # X_joined_input = Add()([X,X_jump])
        X_joined_input = Concatenate(axis = 3)([X,X_jump])
    
    ##### MAIN PATH #####
    for i in np.arange(layer_num)+1:
        # First component of main path 
        X_joined_input = Conv2D(filters_num, filter_size , strides = (s,s), padding = 'same',
                                name = conv_name_base + 'main_' + str(i), kernel_initializer = glorot_uniform())(X_joined_input)
        X_joined_input = BatchNormalization(axis = 3, name = bn_name_base + 'main_' + str(i))(X_joined_input)
        if i != layer_num:
            X_joined_input = Activation('relu')(X_joined_input)

    X_joined_input = Activation('relu')(X_joined_input)
    
    # Up-sampling layer. At the output layer, up-sampling is disabled and replaced by other stuffs manually
    if up_sampling == True:
        X_uped = Conv2DTranspose(filters_num, (2, 2), strides = (2,2), padding = 'valid',
                                 name = conv_name_base + 'up', kernel_initializer = glorot_uniform())(X_joined_input)
        X_uped = BatchNormalization(axis = 3, name = bn_name_base + 'up')(X_uped)
        X_uped = Activation('relu')(X_uped)
        return X_uped
    else:
        return X_joined_input
    
# FullVnet
# Output layers have 3 channels. The first two channels represent two one-hot vectors (pupil and non-pupil)
# The third layer contains all zeros in all cases (trivial) 
Example #25
Source File: encoders_decoders.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def conv_decoder(output_side=32, n_channels=3, representation_dim=256, activation='relu'):
    nf = 64

    rep_in = Input(shape=(representation_dim,))

    g = Dense(nf * 4 * 4 * 4)(rep_in)
    g = BatchNormalization(axis=-1)(g)
    g = Activation(activation)(g)

    conv_shape = (nf * 4, 4, 4) if get_channels_axis() == 1 else (4, 4, nf * 4)
    g = Reshape(conv_shape)(g)

    # upsample x2
    g = Conv2DTranspose(nf * 2, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)
    g = BatchNormalization(axis=get_channels_axis())(g)
    g = Activation(activation)(g)

    # upsample x2
    g = Conv2DTranspose(nf, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)
    g = BatchNormalization(axis=get_channels_axis())(g)
    g = Activation(activation)(g)

    if output_side == 64:
        # upsample x2
        g = Conv2DTranspose(nf, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)
        g = BatchNormalization(axis=get_channels_axis())(g)
        g = Activation(activation)(g)

    # upsample x2
    g = Conv2DTranspose(n_channels, kernel_size=(3, 3), strides=(2, 2), padding='same')(g)
    g = Activation('tanh')(g)

    return Model(rep_in, g) 
Example #26
Source File: enhancer.py    From ImageEnhancer with MIT License 5 votes vote down vote up
def deconv(self, layer, filters, expand=False):
        """ simplify the de-convolutional layer with kernal size as (3, 3), and padding as same;
            there is no pooling layer and is replaced by convolution layer with stride (2, 2);
            each layer follows by a batch normalization layer and an activation layer
            :param layer: the input layer
            :param filters: number of filters
            :param expand: whether expand the size of image or not, default False
            :return: a new layer after de-convoluation
        """
        layer = BatchNormalization()(layer)
        layer = self.activate(layer)
        layer = Conv2DTranspose(filters, (3, 3), padding='same', strides=((2, 2) if expand else (1, 1)))(layer)
        return layer 
Example #27
Source File: test_bench.py    From Keras-inference-time-optimizer with MIT License 5 votes vote down vote up
def get_Conv2DTranspose_model():
    from keras.layers import Input, Conv2D, Conv2DTranspose, BatchNormalization, Activation
    from keras.models import Model
    inp = Input((28, 28, 4))
    x = Conv2DTranspose(8, (3, 3), padding='same', kernel_initializer='random_uniform')(inp)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(4, (3, 3), strides=(4, 4), padding='same', kernel_initializer='random_uniform')(x)
    x = BatchNormalization()(x)
    out = Activation('relu')(x)
    model = Model(inputs=inp, outputs=out)
    return model 
Example #28
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def deconvolution(layer, layer_in, layerId, tensor=True):
    out = {}
    padding = get_padding(layer)
    k_h, k_w = layer['params']['kernel_h'], layer['params']['kernel_w']
    s_h, s_w = layer['params']['stride_h'], layer['params']['stride_w']
    d_h, d_w = layer['params']['dilation_h'], layer['params']['dilation_w']
    if (layer['params']['weight_filler'] in fillerMap):
        kernel_initializer = fillerMap[layer['params']['weight_filler']]
    else:
        kernel_initializer = layer['params']['weight_filler']
    if (layer['params']['bias_filler'] in fillerMap):
        bias_initializer = fillerMap[layer['params']['bias_filler']]
    else:
        bias_initializer = layer['params']['bias_filler']
    filters = layer['params']['num_output']
    if (padding == 'custom'):
        p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
        out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
        padding = 'valid'
        layer_in = [out[layerId + 'Pad']]
    kernel_regularizer = regularizerMap[layer['params']['kernel_regularizer']]
    bias_regularizer = regularizerMap[layer['params']['bias_regularizer']]
    activity_regularizer = regularizerMap[layer['params']
                                          ['activity_regularizer']]
    kernel_constraint = constraintMap[layer['params']['kernel_constraint']]
    bias_constraint = constraintMap[layer['params']['bias_constraint']]
    use_bias = layer['params']['use_bias']
    out[layerId] = Conv2DTranspose(filters, [k_h, k_w], strides=(s_h, s_w), padding=padding,
                                   dilation_rate=(
                                       d_h, d_w), kernel_initializer=kernel_initializer,
                                   bias_initializer=bias_initializer,
                                   kernel_regularizer=kernel_regularizer,
                                   bias_regularizer=bias_regularizer,
                                   activity_regularizer=activity_regularizer, use_bias=use_bias,
                                   bias_constraint=bias_constraint,
                                   kernel_constraint=kernel_constraint)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out 
Example #29
Source File: test_topology.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def convert_weights(layer, weights):
    if layer.__class__.__name__ == 'GRU':
        W = [np.split(w, 3, axis=-1) for w in weights]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ in ('LSTM', 'ConvLSTM2D'):
        W = [np.split(w, 4, axis=-1) for w in weights]
        for w in W:
            w[2], w[1] = w[1], w[2]
        return sum(map(list, zip(*W)), [])
    elif layer.__class__.__name__ == 'Conv2DTranspose':
        return [np.transpose(weights[0], (2, 3, 0, 1)), weights[1]]
    return weights 
Example #30
Source File: utilModelREDNet.py    From document-image-binarization with GNU General Public License v3.0 5 votes vote down vote up
def build_REDNet(nb_layers, input_size, nb_filters=32, k_size=3, dropout=0, strides=1, every=1):
    # -> CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC ->  # https://arxiv.org/pdf/1502.03167.pdf
    input_img = Input(shape=(input_size, input_size, 1))
    x = input_img

    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    encoderLayers = [None] * nb_layers

    for i in range(nb_layers):
        x = Conv2D(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x)
        x = BatchNormalization(axis=bn_axis)(x)
        x = Activation('relu')(x)
        if dropout > 0:
            x = Dropout(dropout)(x)
        encoderLayers[i] = x

    encoded = x

    for i in range(nb_layers):
        ind = nb_layers - i - 1
        x = layers.add([x, encoderLayers[ind]])

        x = Conv2DTranspose(nb_filters, kernel_size=k_size, strides=strides, padding='same')(x)
        x = BatchNormalization(axis=bn_axis)(x)
        x = Activation('relu')(x)
        if dropout > 0:
            x = Dropout(dropout)(x)

    decoded = Conv2D(1, kernel_size=k_size, strides=1, padding='same', activation='sigmoid')(x)

    autoencoder = Model(input_img, decoded)

    return autoencoder, encoded, decoded