Python keras.layers.convolutional.UpSampling2D() Examples
The following are 30
code examples of keras.layers.convolutional.UpSampling2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.convolutional
, or try the search function
.
Example #1
Source File: sgan.py From Keras-GAN with MIT License | 8 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #2
Source File: base.py From keras-rtst with MIT License | 6 votes |
def create_res_texture_net(input_rows, input_cols, num_res_filters=128, res_out_activation='linear', activation='relu', num_res_blocks=5): net = Graph() net.add_input('x', input_shape=(3, input_rows, input_cols)) add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation) add_conv_block(net, 'in1', 'in0', num_res_filters // 2, 3, subsample=(2, 2), activation=activation) add_conv_block(net, 'in2', 'in1', num_res_filters, 3, subsample=(2, 2), activation=activation) last_block_name = 'in2' for res_i in range(num_res_blocks): block_name = 'res_{}'.format(res_i) add_conv_block(net, block_name + '_in0', last_block_name, num_res_filters, 3, activation=activation) add_conv_block(net, block_name + '_in1', block_name + '_in0', num_res_filters, 3, activation='linear') net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_in1', last_block_name]) last_block_name = block_name # theano doesn't seem to support fractionally-strided convolutions at the moment net.add_node(UpSampling2D(), 'out_up0', last_block_name) add_conv_block(net, 'out_0', 'out_up0', num_res_filters // 2, 3, activation=activation) net.add_node(UpSampling2D(), 'out_up1', 'out_0') add_conv_block(net, 'out_1', 'out_up1', num_res_filters // 4, 3, activation=activation) add_conv_block(net, 'out_2', 'out_1', 3, 9, activation='linear') net.add_node(Activation('linear'), 'texture_rgb', 'out_2', create_output=True) return net
Example #3
Source File: dcgan.py From Keras-DCGAN-killmebaby with MIT License | 6 votes |
def build_generator(self): noise_shape = (self.z_dim,) model = Sequential() model.add(Dense(128 * 32 * 32, activation="relu", input_shape=noise_shape)) model.add(Reshape((32, 32, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(3, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=noise_shape) img = model(noise) return Model(noise, img)
Example #4
Source File: wgan_gp.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #5
Source File: wgan.py From Keras-GAN with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #6
Source File: Chapter_3_wgan.py From Hands-On-Deep-Learning-for-Games with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #7
Source File: Chapter_3_2.py From Hands-On-Deep-Learning-for-Games with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=3, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #8
Source File: Chapter_3_3.py From Hands-On-Deep-Learning-for-Games with MIT License | 6 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 8 * 8, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((8, 8, 128))) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=4, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(Activation("relu")) model.add(Conv2D(self.channels, kernel_size=4, padding="same")) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) img = model(noise) return Model(noise, img)
Example #9
Source File: densenet.py From SSR-Net with Apache License 2.0 | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #10
Source File: cifar_10_gan.py From Deep-Learning-Quick-Reference with MIT License | 6 votes |
def build_generator(noise_shape=(100,)): input = Input(noise_shape) x = Dense(128 * 8 * 8, activation="relu")(input) x = Reshape((8, 8, 128))(x) x = BatchNormalization(momentum=0.8)(x) x = UpSampling2D()(x) x = Conv2D(128, kernel_size=3, padding="same")(x) x = Activation("relu")(x) x = BatchNormalization(momentum=0.8)(x) x = UpSampling2D()(x) x = Conv2D(64, kernel_size=3, padding="same")(x) x = Activation("relu")(x) x = BatchNormalization(momentum=0.8)(x) x = Conv2D(3, kernel_size=3, padding="same")(x) out = Activation("tanh")(x) model = Model(input, out) print("-- Generator -- ") model.summary() return model
Example #11
Source File: densenet.py From Model-Playgrounds with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #12
Source File: fcDensenet.py From PyTorch-Luna16 with Apache License 2.0 | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #13
Source File: example_gan_convolutional.py From keras-adversarial with MIT License | 6 votes |
def model_generator(): nch = 256 g_input = Input(shape=[100]) H = Dense(nch * 14 * 14)(g_input) H = BatchNormalization(mode=2)(H) H = Activation('relu')(H) H = dim_ordering_reshape(nch, 14)(H) H = UpSampling2D(size=(2, 2))(H) H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H) H = BatchNormalization(mode=2, axis=1)(H) H = Activation('relu')(H) H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H) H = BatchNormalization(mode=2, axis=1)(H) H = Activation('relu')(H) H = Convolution2D(1, 1, 1, border_mode='same')(H) g_V = Activation('sigmoid')(H) return Model(g_input, g_V)
Example #14
Source File: dcgan_mnist.py From keras-examples with MIT License | 6 votes |
def generator_model(): model = Sequential() model.add(Dense(input_dim=100, output_dim=1024)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(7 * 7 * 128)) model.add(BatchNormalization()) model.add(Activation('relu')) # tfモードの場合はチャネルは後! model.add(Reshape((7, 7, 128), input_shape=(7 * 7 * 128,))) model.add(UpSampling2D((2, 2))) # 画像のサイズが2倍になる 14x14 model.add(Convolution2D(64, 5, 5, border_mode='same')) model.add(Activation('tanh')) model.add(UpSampling2D(size=(2, 2))) # 28x28 model.add(Convolution2D(1, 5, 5, border_mode='same')) # 28x28x1が出力 model.add(Activation('tanh')) return model
Example #15
Source File: example_aae_cifar10.py From keras-adversarial with MIT License | 6 votes |
def model_generator(latent_dim, units=512, dropout=0.5, reg=lambda: l1l2(l1=1e-7, l2=1e-7)): model = Sequential(name="decoder") h = 5 model.add(Dense(units * 4 * 4, input_dim=latent_dim, W_regularizer=reg())) model.add(Reshape(dim_ordering_shape((units, 4, 4)))) # model.add(SpatialDropout2D(dropout)) model.add(LeakyReLU(0.2)) model.add(Convolution2D(units / 2, h, h, border_mode='same', W_regularizer=reg())) # model.add(SpatialDropout2D(dropout)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(units / 2, h, h, border_mode='same', W_regularizer=reg())) # model.add(SpatialDropout2D(dropout)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(units / 4, h, h, border_mode='same', W_regularizer=reg())) # model.add(SpatialDropout2D(dropout)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg())) model.add(Activation('sigmoid')) return model
Example #16
Source File: example_gan_cifar10.py From keras-adversarial with MIT License | 6 votes |
def model_generator(): model = Sequential() nch = 256 reg = lambda: l1l2(l1=1e-7, l2=1e-7) h = 5 model.add(Dense(nch * 4 * 4, input_dim=100, W_regularizer=reg())) model.add(BatchNormalization(mode=0)) model.add(Reshape(dim_ordering_shape((nch, 4, 4)))) model.add(Convolution2D(int(nch / 2), h, h, border_mode='same', W_regularizer=reg())) model.add(BatchNormalization(mode=0, axis=1)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(int(nch / 2), h, h, border_mode='same', W_regularizer=reg())) model.add(BatchNormalization(mode=0, axis=1)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(int(nch / 4), h, h, border_mode='same', W_regularizer=reg())) model.add(BatchNormalization(mode=0, axis=1)) model.add(LeakyReLU(0.2)) model.add(UpSampling2D(size=(2, 2))) model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg())) model.add(Activation('sigmoid')) return model
Example #17
Source File: run.py From Generative-Adversarial-Networks-Projects with MIT License | 6 votes |
def build_generator(): gen_model = Sequential() gen_model.add(Dense(input_dim=100, output_dim=2048)) gen_model.add(ReLU()) gen_model.add(Dense(256 * 8 * 8)) gen_model.add(BatchNormalization()) gen_model.add(ReLU()) gen_model.add(Reshape((8, 8, 256), input_shape=(256 * 8 * 8,))) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(128, (5, 5), padding='same')) gen_model.add(ReLU()) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(64, (5, 5), padding='same')) gen_model.add(ReLU()) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(3, (5, 5), padding='same')) gen_model.add(Activation('tanh')) return gen_model
Example #18
Source File: base.py From keras-rtst with MIT License | 6 votes |
def create_sequential_texture_net(input_rows, input_cols, num_res_filters=128, res_out_activation='linear', activation='relu', num_inner_blocks=5): net = Sequential() add_seq_conv_block(net, num_res_filters // 4, 9, input_shape=(3, input_rows, input_cols), activation=activation) add_seq_conv_block(net, num_res_filters // 2, 3, subsample=(2, 2), activation=activation) add_seq_conv_block(net, num_res_filters, 3, subsample=(2, 2), activation=activation) for i in range(num_inner_blocks): add_seq_conv_block(net, num_res_filters, 3, activation=activation) add_seq_conv_block(net, num_res_filters, 3, activation=activation) # theano doesn't seem to support fractionally-strided convolutions at the moment net.add(UpSampling2D()) add_seq_conv_block(net, num_res_filters // 2, 3, activation=activation) net.add(UpSampling2D()) add_seq_conv_block(net, num_res_filters // 4, 3, activation=activation) add_seq_conv_block(net, 3, 9, activation='linear') return net
Example #19
Source File: densenet_1.py From keras-onnx with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #20
Source File: densenet.py From semantic-embeddings with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #21
Source File: mnist_gan.py From Deep-Learning-Quick-Reference with MIT License | 6 votes |
def build_generator(noise_shape=(100,)): input = Input(noise_shape) x = Dense(128 * 7 * 7, activation="relu")(input) x = Reshape((7, 7, 128))(x) x = BatchNormalization(momentum=0.8)(x) x = UpSampling2D()(x) x = Conv2D(128, kernel_size=3, padding="same")(x) x = Activation("relu")(x) x = BatchNormalization(momentum=0.8)(x) x = UpSampling2D()(x) x = Conv2D(64, kernel_size=3, padding="same")(x) x = Activation("relu")(x) x = BatchNormalization(momentum=0.8)(x) x = Conv2D(1, kernel_size=3, padding="same")(x) out = Activation("tanh")(x) model = Model(input, out) print("-- Generator -- ") model.summary() return model
Example #22
Source File: models.py From DeepLearningImplementations with MIT License | 5 votes |
def up_conv_block_unet(x, x2, f, name, bn_mode, bn_axis, bn=True, dropout=False): x = Activation("relu")(x) x = UpSampling2D(size=(2, 2))(x) x = Conv2D(f, (3, 3), name=name, padding="same")(x) if bn: x = BatchNormalization(axis=bn_axis)(x) if dropout: x = Dropout(0.5)(x) x = Concatenate(axis=bn_axis)([x, x2]) return x
Example #23
Source File: models.py From DeepLearningImplementations with MIT License | 5 votes |
def lambda_output(input_shape): return input_shape[:2] # def conv_block_unet(x, f, name, bn_mode, bn_axis, bn=True, dropout=False, strides=(2,2)): # x = Conv2D(f, (3, 3), strides=strides, name=name, padding="same")(x) # if bn: # x = BatchNormalization(axis=bn_axis)(x) # x = LeakyReLU(0.2)(x) # if dropout: # x = Dropout(0.5)(x) # return x # def up_conv_block_unet(x1, x2, f, name, bn_mode, bn_axis, bn=True, dropout=False): # x1 = UpSampling2D(size=(2, 2))(x1) # x = merge([x1, x2], mode="concat", concat_axis=bn_axis) # x = Conv2D(f, (3, 3), name=name, padding="same")(x) # if bn: # x = BatchNormalization(axis=bn_axis)(x) # x = Activation("relu")(x) # if dropout: # x = Dropout(0.5)(x) # return x
Example #24
Source File: model_fcn.py From Keras-FCN with MIT License | 5 votes |
def testnet_fcn(n_classes): stride = 32 input_tensor = Input(shape=(None, None, 3)) x = Convolution2D(4,5,5,name='conv', activation = 'relu', border_mode='same', subsample= (stride,stride))(input_tensor) x = Softmax4D(axis=-1)(x) x = UpSampling2D(size=(stride,stride))(x) x = Convolution2D(n_classes,3,3,name = 'pred_up',border_mode = 'same')(x) model = Model(input=input_tensor,output=x) return model, stride
Example #25
Source File: models.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 5 votes |
def decoding_block(x, skip_input, n_filters, kernel_size=4): x = UpSampling2D(size=2)(x) x = Conv2D(n_filters, kernel_size=kernel_size, strides=1, padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dropout(0.5)(x) x = Concatenate()([x, skip_input]) return x
Example #26
Source File: girthy.py From keras-rtst with MIT License | 5 votes |
def create_res_texture_net(input_rows, input_cols, num_res_filters=128, res_out_activation='linear', activation='relu', num_res_blocks=5, depth=3): '''Adds a series of residual blocks at each resolution scale, rather than just the minimium one. ''' net = Graph() net.add_input('x', input_shape=(3, input_rows, input_cols)) add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation) last_name = 'in0' # scale down input to max depth with a series of strided convolutions for scale_i in range(depth): num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1)) scale_name = 'down_{}'.format(scale_i) add_conv_block(net, scale_name, last_name, num_scale_filters, 3, subsample=(2, 2), activation=activation) last_name = scale_name # add a series of residual blocks at each scale, from smallest to largest for scale_i in reversed(range(depth)): num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1)) last_scale_name = last_name for res_i in range(num_res_blocks): block_name = 'res_{}_{}'.format(scale_i, res_i) add_conv_block(net, block_name + '_b0', last_name, num_res_filters, 3, activation=activation) add_conv_block(net, block_name + '_b1', block_name + '_b0', num_res_filters, 1, activation='linear') if last_name == last_scale_name: # tranform residual connection to same number of filters add_conv_block(net, block_name + '_res', last_name, num_res_filters, 1, activation='linear') else: # no transform needed when the last node was part of the current residual block net.add_node(Layer(), block_name + '_res', last_name) net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_b1', block_name + '_res']) last_name = block_name # theano doesn't seem to support fractionally-strided convolutions at the moment up_name = 'up_{}'.format(scale_i) net.add_node(UpSampling2D(), up_name, last_name) last_name = up_name last_scale_name = up_name # final output add_conv_block(net, 'out', last_name, 3, 9, activation='linear') net.add_node(Activation('linear'), 'texture_rgb', 'out', create_output=True) return net
Example #27
Source File: SeGAN.py From DeepLearning-SeGAN-Segmentation with MIT License | 5 votes |
def upsampl_block(x, nb_filters, kernel, stride, size): x = UpSampling2D(size=size)(x) x = conv_l1(x, nb_filters, kernel, stride) x = bnorm(x) return relu(x)
Example #28
Source File: SeGAN.py From DeepLearning-SeGAN-Segmentation with MIT License | 5 votes |
def upsampl_conv(x, nb_filters, kernel, stride, size): x = UpSampling2D(size=size)(x) return conv_l1(x, nb_filters, kernel, stride)
Example #29
Source File: models.py From kaggle-carvana-2017 with MIT License | 5 votes |
def get_unet_resnet(input_shape): resnet_base = ResNet50(input_shape=input_shape, include_top=False) if args.show_summary: resnet_base.summary() for l in resnet_base.layers: l.trainable = True conv1 = resnet_base.get_layer("activation_1").output conv2 = resnet_base.get_layer("activation_10").output conv3 = resnet_base.get_layer("activation_22").output conv4 = resnet_base.get_layer("activation_40").output conv5 = resnet_base.get_layer("activation_49").output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_block_simple(up6, 256, "conv6_1") conv6 = conv_block_simple(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_block_simple(up7, 192, "conv7_1") conv7 = conv_block_simple(conv7, 192, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_block_simple(up8, 128, "conv8_1") conv8 = conv_block_simple(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_block_simple(up9, 64, "conv9_1") conv9 = conv_block_simple(conv9, 64, "conv9_2") vgg = VGG16(input_shape=input_shape, input_tensor=resnet_base.input, include_top=False) for l in vgg.layers: l.trainable = False vgg_first_conv = vgg.get_layer("block1_conv2").output up10 = concatenate([UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1) conv10 = conv_block_simple(up10, 32, "conv10_1") conv10 = conv_block_simple(conv10, 32, "conv10_2") conv10 = SpatialDropout2D(0.2)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10) model = Model(resnet_base.input, x) return model
Example #30
Source File: models.py From Hands-On-Generative-Adversarial-Networks-with-Keras with MIT License | 5 votes |
def build_generator(input_shape=(256, 256, 3), ngf=64, kernel_size=4, strides=2): """U-Net Generator""" image_input = Input(shape=input_shape) n_channels = input_shape[-1] # encoding blocks e1 = Conv2D(ngf, kernel_size=kernel_size, strides=2, padding='same')( image_input) e1 = LeakyReLU(alpha=0.2)(e1) e2 = encoding_block(e1, ngf*2) e3 = encoding_block(e2, ngf*4) e4 = encoding_block(e3, ngf*8) e5 = encoding_block(e4, ngf*8) e6 = encoding_block(e5, ngf*8) x = encoding_block(e6, ngf*8) # decoding blocks x = decoding_block(x, e6, ngf*8) x = decoding_block(x, e5, ngf*8) x = decoding_block(x, e4, ngf*8) x = decoding_block(x, e3, ngf*4) x = decoding_block(x, e2, ngf*2) x = decoding_block(x, e1, ngf) x = UpSampling2D(size=2)(x) x = Conv2D(n_channels, kernel_size=4, strides=1, padding='same')(x) x = Activation('tanh')(x) # create model graph model = Model(inputs=image_input, outputs=x, name='Generator') print("\nGenerator") model.summary() return model