Python keras.layers.convolutional.Conv2DTranspose() Examples
The following are 30
code examples of keras.layers.convolutional.Conv2DTranspose().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.convolutional
, or try the search function
.
Example #1
Source File: densenet.py From SSR-Net with Apache License 2.0 | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #2
Source File: densenet_1.py From keras-onnx with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #3
Source File: densenet.py From Model-Playgrounds with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #4
Source File: densenet.py From semantic-embeddings with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #5
Source File: model.py From Vocal-Melody-Extraction with MIT License | 6 votes |
def transpose_conv_block(input_tensor, channel, kernel_size, strides=(2, 2), dropout_rate=0.4 ): skip = input_tensor input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor) input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) if (strides != (1, 1)): skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip) input_tensor = add([input_tensor, skip]) return input_tensor
Example #6
Source File: fcDensenet.py From PyTorch-Luna16 with Apache License 2.0 | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #7
Source File: resnet.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 6 votes |
def resnet_block_generator(input, n_blocks, n_filters, kernel_size=(3, 3), stride=2): output = input for i in range(n_blocks): output = BatchNormalization()(output) output = Activation('relu')(output) output = Conv2DTranspose(filters=n_filters, kernel_size=kernel_size, strides=stride, padding='same', kernel_initializer=weight_init)(output) output = BatchNormalization()(output) output = Activation('relu')(output) output = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=1, padding='same', kernel_initializer=weight_init)(output) if input.shape[1:] != output.shape[1:]: # Upsample input to match output dimension input = UpsampleConv(input, n_filters) print("resnet: adding layer to match residual input to output") # Residual Connection output = Add()([input, output]) return output
Example #8
Source File: model.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 6 votes |
def transpose_conv_block(input_tensor, channel, kernel_size, strides=(2, 2), dropout_rate=0.4 ): skip = input_tensor input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor) input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) if (strides != (1, 1)): skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip) input_tensor = add([input_tensor, skip]) return input_tensor
Example #9
Source File: model.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 6 votes |
def transpose_conv_block(input_tensor, channel, kernel_size, strides=(2, 2), dropout_rate=0.4 ): skip = input_tensor input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor) input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) if (strides != (1, 1)): skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip) input_tensor = add([input_tensor, skip]) return input_tensor
Example #10
Source File: model_attn.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 6 votes |
def transpose_conv_block(input_tensor, channel, kernel_size, strides=(2, 2), dropout_rate=0.4 ): skip = input_tensor input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2D(channel, kernel_size, strides=(1, 1), padding="same")(input_tensor) input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) if (strides != (1, 1)): skip = Conv2DTranspose(channel, (1, 1), strides=strides, padding="same")(skip) input_tensor = add([input_tensor, skip]) return input_tensor
Example #11
Source File: mnist_acgan.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def build_generator(latent_size): # we will map a pair of (z, L), where z is a latent vector and L is a # label drawn from P_c, to image space (..., 28, 28, 1) cnn = Sequential() cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu')) cnn.add(Reshape((3, 3, 384))) # upsample to (7, 7, ...) cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (14, 14, ...) cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (28, 28, ...) cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='tanh', kernel_initializer='glorot_normal')) # this is the z space commonly referred to in GAN papers latent = Input(shape=(latent_size, )) # this will be our label image_class = Input(shape=(1,), dtype='int32') cls = Flatten()(Embedding(num_classes, latent_size, embeddings_initializer='glorot_normal')(image_class)) # hadamard product between z-space and a class conditional embedding h = layers.multiply([latent, cls]) fake_image = cnn(h) return Model([latent, image_class], fake_image)
Example #12
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_conv2d_transpose(): num_samples = 2 filters = 2 stack_size = 3 num_row = 5 num_col = 6 for padding in _convolution_paddings: for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'strides': strides, 'data_format': 'channels_last'}, input_shape=(num_samples, num_row, num_col, stack_size), fixed_batch_size=True) layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'data_format': 'channels_first', 'activation': None, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'kernel_constraint': 'max_norm', 'bias_constraint': 'max_norm', 'strides': strides}, input_shape=(num_samples, stack_size, num_row, num_col), fixed_batch_size=True) # Test invalid use case with pytest.raises(ValueError): model = Sequential([convolutional.Conv2DTranspose(filters=filters, kernel_size=3, padding=padding, batch_input_shape=(None, None, 5, None))])
Example #13
Source File: deblur_gan.py From deep_learning with MIT License | 5 votes |
def generator_model(): """生成器模型 """ inputs = Input(Config.input_shape_generator) x = ReflectionPadding2D((3, 3))(inputs) print(x.shape) x = Conv2D(filters=Config.ngf, kernel_size=(7, 7), padding="valid")(x) x = BatchNormalization()(x) x = Activation("relu")(x) n_downsampling = 2 for i in range(n_downsampling): mulit = 2**i x = Conv2D(filters=Config.ngf*mulit*2, kernel_size=(3, 3), strides=2, padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) mulit = 2**n_downsampling for i in range(Config.n_blocks_gen): x = res_block(x, Config.ngf*mulit, use_dropout=True) for i in range(n_downsampling): mulit = 2**(n_downsampling-i) x = Conv2DTranspose(filters=int(Config.ngf*mulit/2), kernel_size=(3, 3), strides=2, padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = ReflectionPadding2D(padding=(3, 3))(x) x = Conv2D(filters=Config.output_nc, kernel_size=(7, 7), padding="valid")(x) x = Activation("tanh")(x) # 输出 outputs = Add()([inputs, x]) outputs = Lambda(lambda z: z/2)(outputs) print("generator : ",outputs.shape) model = Model(inputs=inputs, outputs=outputs, name="Generator") return model
Example #14
Source File: model.py From Vocal-Melody-Extraction with MIT License | 5 votes |
def adapter(input_tensor, channel, kernel_size=(1, 9), strides=(1, 3), dropout_rate=0.2 ): input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) return input_tensor
Example #15
Source File: model.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 5 votes |
def adapter(input_tensor, channel, kernel_size=(1, 9), strides=(1, 3), dropout_rate=0.2 ): input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) return input_tensor
Example #16
Source File: model.py From Music-Transcription-with-Semantic-Segmentation with GNU General Public License v3.0 | 5 votes |
def adapter(input_tensor, channel, kernel_size=(1, 9), strides=(1, 3), dropout_rate=0.2 ): input_tensor = BatchNormalization()(Activation("relu")(input_tensor)) input_tensor = Dropout(dropout_rate)(input_tensor) input_tensor = Conv2DTranspose(channel, kernel_size, strides=strides, padding="same")(input_tensor) return input_tensor
Example #17
Source File: layers.py From Keras-GAN-Animeface-Character with MIT License | 5 votes |
def bilinear2x(x, nfilters): ''' Ugh, I don't like making layers. My credit goes to: https://kivantium.net/keras-bilinear ''' return Conv2DTranspose(nfilters, (4, 4), strides=(2, 2), padding='same', kernel_initializer=Constant(bilinear_upsample_weights(2, nfilters)))(x)
Example #18
Source File: models_pix2pixhd.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 5 votes |
def build_encoder(img_shape=(2048, 1024, 3), instance_shape=(2048, 1024, 1), n_out_channels=3, ngf=32, n_downsampling=4): img = Input(shape=img_shape) inst = Input(shape=instance_shape) inputs = Concatenate(axis=-1)([img, inst]) x = ReflectionPadding2D(3)(inputs) x = Conv2D(ngf, kernel_size=7, strides=1, padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # downsample for i in range(n_downsampling): mult = 2**i x = Conv2D(ngf * mult * 2, kernel_size=3, strides=2, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # upsample for i in range(n_downsampling): mult = 2**(n_downsampling - i) x = Conv2DTranspose(filters=int(ngf * mult / 2), kernel_size=3, strides=2, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) # final convolution x = ReflectionPadding2D(3)(x) x = Conv2D(n_out_channels, kernel_size=7, strides=1, padding='valid')(x) x = Activation('tanh')(x) # x = InstanceWiseAveragePooling()([x, K.cast(inst, np.int32)]) # create model graph model = Model(inputs=[img, inst], outputs=x, name='Encoder') print("\nEncoder") model.summary() return model
Example #19
Source File: decoder.py From enet-keras with MIT License | 5 votes |
def bottleneck(encoder, output, upsample=False, reverse_module=False): internal = output // 4 x = Conv2D(internal, (1, 1), use_bias=False)(encoder) x = BatchNormalization(momentum=0.1)(x) x = Activation('relu')(x) if not upsample: x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x) else: x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x) x = BatchNormalization(momentum=0.1)(x) x = Activation('relu')(x) x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x) other = encoder if encoder.get_shape()[-1] != output or upsample: other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other) other = BatchNormalization(momentum=0.1)(other) if upsample and reverse_module is not False: other = UpSampling2D(size=(2, 2))(other) if upsample and reverse_module is False: decoder = x else: x = BatchNormalization(momentum=0.1)(x) decoder = add([x, other]) decoder = Activation('relu')(decoder) return decoder
Example #20
Source File: decoder.py From enet-keras with MIT License | 5 votes |
def build(encoder, nc): enet = bottleneck(encoder, 64, upsample=True, reverse_module=True) # bottleneck 4.0 enet = bottleneck(enet, 64) # bottleneck 4.1 enet = bottleneck(enet, 64) # bottleneck 4.2 enet = bottleneck(enet, 16, upsample=True, reverse_module=True) # bottleneck 5.0 enet = bottleneck(enet, 16) # bottleneck 5.1 enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet) return enet
Example #21
Source File: decoder.py From enet-keras with MIT License | 5 votes |
def bottleneck(encoder, output, upsample=False, reverse_module=False): internal = output // 4 x = Conv2D(internal, (1, 1), use_bias=False)(encoder) x = BatchNormalization(momentum=0.1)(x) # x = Activation('relu')(x) x = PReLU(shared_axes=[1, 2])(x) if not upsample: x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x) else: x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x) x = BatchNormalization(momentum=0.1)(x) # x = Activation('relu')(x) x = PReLU(shared_axes=[1, 2])(x) x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x) other = encoder if encoder.get_shape()[-1] != output or upsample: other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other) other = BatchNormalization(momentum=0.1)(other) if upsample and reverse_module is not False: other = MaxUnpooling2D()([other, reverse_module]) if upsample and reverse_module is False: decoder = x else: x = BatchNormalization(momentum=0.1)(x) decoder = add([x, other]) # decoder = Activation('relu')(decoder) decoder = PReLU(shared_axes=[1, 2])(decoder) return decoder
Example #22
Source File: decoder.py From enet-keras with MIT License | 5 votes |
def build(encoder, nc): network, index_stack = encoder enet = bottleneck(network, 64, upsample=True, reverse_module=index_stack.pop()) # bottleneck 4.0 enet = bottleneck(enet, 64) # bottleneck 4.1 enet = bottleneck(enet, 64) # bottleneck 4.2 enet = bottleneck(enet, 16, upsample=True, reverse_module=index_stack.pop()) # bottleneck 5.0 enet = bottleneck(enet, 16) # bottleneck 5.1 enet = Conv2DTranspose(filters=nc, kernel_size=(2, 2), strides=(2, 2), padding='same')(enet) return enet
Example #23
Source File: mnist_acgan.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def build_generator(latent_size): # we will map a pair of (z, L), where z is a latent vector and L is a # label drawn from P_c, to image space (..., 28, 28, 1) cnn = Sequential() cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu')) cnn.add(Reshape((3, 3, 384))) # upsample to (7, 7, ...) cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (14, 14, ...) cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (28, 28, ...) cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='tanh', kernel_initializer='glorot_normal')) # this is the z space commonly referred to in GAN papers latent = Input(shape=(latent_size, )) # this will be our label image_class = Input(shape=(1,), dtype='int32') cls = Flatten()(Embedding(num_classes, latent_size, embeddings_initializer='glorot_normal')(image_class)) # hadamard product between z-space and a class conditional embedding h = layers.multiply([latent, cls]) fake_image = cnn(h) return Model([latent, image_class], fake_image)
Example #24
Source File: classifier.py From abnormal-spatiotemporal-ae with GNU General Public License v3.0 | 5 votes |
def get_model(t): from keras.models import Model from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.convolutional_recurrent import ConvLSTM2D from keras.layers.normalization import BatchNormalization from keras.layers.wrappers import TimeDistributed from keras.layers.core import Activation from keras.layers import Input input_tensor = Input(shape=(t, 224, 224, 1)) conv1 = TimeDistributed(Conv2D(128, kernel_size=(11, 11), padding='same', strides=(4, 4), name='conv1'), input_shape=(t, 224, 224, 1))(input_tensor) conv1 = TimeDistributed(BatchNormalization())(conv1) conv1 = TimeDistributed(Activation('relu'))(conv1) conv2 = TimeDistributed(Conv2D(64, kernel_size=(5, 5), padding='same', strides=(2, 2), name='conv2'))(conv1) conv2 = TimeDistributed(BatchNormalization())(conv2) conv2 = TimeDistributed(Activation('relu'))(conv2) convlstm1 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm1')(conv2) convlstm2 = ConvLSTM2D(32, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm2')(convlstm1) convlstm3 = ConvLSTM2D(64, kernel_size=(3, 3), padding='same', return_sequences=True, name='convlstm3')(convlstm2) deconv1 = TimeDistributed(Conv2DTranspose(128, kernel_size=(5, 5), padding='same', strides=(2, 2), name='deconv1'))(convlstm3) deconv1 = TimeDistributed(BatchNormalization())(deconv1) deconv1 = TimeDistributed(Activation('relu'))(deconv1) decoded = TimeDistributed(Conv2DTranspose(1, kernel_size=(11, 11), padding='same', strides=(4, 4), name='deconv2'))( deconv1) return Model(inputs=input_tensor, outputs=decoded)
Example #25
Source File: improved_wgan.py From keras-contrib with MIT License | 5 votes |
def make_generator(): """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images of size 28x28x1.""" model = Sequential() model.add(Dense(1024, input_dim=100)) model.add(LeakyReLU()) model.add(Dense(128 * 7 * 7)) model.add(BatchNormalization()) model.add(LeakyReLU()) if K.image_data_format() == 'channels_first': model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,))) bn_axis = 1 else: model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,))) bn_axis = -1 model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) model.add(Convolution2D(64, (5, 5), padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) # Because we normalized training inputs to lie in the range [-1, 1], # the tanh function should be used for the output of the generator to ensure # its output also lies in this range. model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh')) return model
Example #26
Source File: captcha_gan.py From Intelligent-Projects-Using-Python with MIT License | 5 votes |
def generator(input_dim,alpha=0.2): model = Sequential() model.add(Dense(input_dim=input_dim, output_dim=4*4*512)) model.add(Reshape(target_shape=(4,4,512))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(256, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(128, kernel_size=5, strides=2, padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same')) model.add(Activation('tanh')) return model #Define the Discriminator Network
Example #27
Source File: mnist_acgan.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def build_generator(latent_size): # we will map a pair of (z, L), where z is a latent vector and L is a # label drawn from P_c, to image space (..., 28, 28, 1) cnn = Sequential() cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu')) cnn.add(Reshape((3, 3, 384))) # upsample to (7, 7, ...) cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (14, 14, ...) cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (28, 28, ...) cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='tanh', kernel_initializer='glorot_normal')) # this is the z space commonly referred to in GAN papers latent = Input(shape=(latent_size, )) # this will be our label image_class = Input(shape=(1,), dtype='int32') cls = Flatten()(Embedding(num_classes, latent_size, embeddings_initializer='glorot_normal')(image_class)) # hadamard product between z-space and a class conditional embedding h = layers.multiply([latent, cls]) fake_image = cnn(h) return Model([latent, image_class], fake_image)
Example #28
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_conv2d_transpose(): num_samples = 2 filters = 2 stack_size = 3 num_row = 5 num_col = 6 for padding in _convolution_paddings: for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'strides': strides, 'data_format': 'channels_last'}, input_shape=(num_samples, num_row, num_col, stack_size), fixed_batch_size=True) layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'data_format': 'channels_first', 'activation': None, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'kernel_constraint': 'max_norm', 'bias_constraint': 'max_norm', 'strides': strides}, input_shape=(num_samples, stack_size, num_row, num_col), fixed_batch_size=True) # Test invalid use case with pytest.raises(ValueError): model = Sequential([convolutional.Conv2DTranspose(filters=filters, kernel_size=3, padding=padding, batch_input_shape=(None, None, 5, None))])
Example #29
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_conv2d_transpose(): num_samples = 2 filters = 2 stack_size = 3 num_row = 5 num_col = 6 for padding in _convolution_paddings: for strides in [(1, 1), (2, 2)]: if padding == 'same' and strides != (1, 1): continue layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'strides': strides, 'data_format': 'channels_last'}, input_shape=(num_samples, num_row, num_col, stack_size), fixed_batch_size=True) layer_test(convolutional.Deconvolution2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'data_format': 'channels_first', 'activation': None, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'kernel_constraint': 'max_norm', 'bias_constraint': 'max_norm', 'strides': strides}, input_shape=(num_samples, stack_size, num_row, num_col), fixed_batch_size=True) # Test invalid use case with pytest.raises(ValueError): model = Sequential([convolutional.Conv2DTranspose(filters=filters, kernel_size=3, padding=padding, batch_input_shape=(None, None, 5, None))])
Example #30
Source File: mnist_acgan.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def build_generator(latent_size): # we will map a pair of (z, L), where z is a latent vector and L is a # label drawn from P_c, to image space (..., 28, 28, 1) cnn = Sequential() cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu')) cnn.add(Reshape((3, 3, 384))) # upsample to (7, 7, ...) cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (14, 14, ...) cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same', activation='relu', kernel_initializer='glorot_normal')) cnn.add(BatchNormalization()) # upsample to (28, 28, ...) cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same', activation='tanh', kernel_initializer='glorot_normal')) # this is the z space commonly referred to in GAN papers latent = Input(shape=(latent_size, )) # this will be our label image_class = Input(shape=(1,), dtype='int32') cls = Flatten()(Embedding(num_classes, latent_size, embeddings_initializer='glorot_normal')(image_class)) # hadamard product between z-space and a class conditional embedding h = layers.multiply([latent, cls]) fake_image = cnn(h) return Model([latent, image_class], fake_image)