Python keras.layers.core.SpatialDropout2D() Examples
The following are 10
code examples of keras.layers.core.SpatialDropout2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.core
, or try the search function
.
Example #1
Source File: zf_unet_224_model.py From ZF_UNET_224_Pretrained_Model with GNU General Public License v3.0 | 6 votes |
def double_conv_layer(x, size, dropout=0.0, batch_norm=True): if K.image_dim_ordering() == 'th': axis = 1 else: axis = 3 conv = Conv2D(size, (3, 3), padding='same')(x) if batch_norm is True: conv = BatchNormalization(axis=axis)(conv) conv = Activation('relu')(conv) conv = Conv2D(size, (3, 3), padding='same')(conv) if batch_norm is True: conv = BatchNormalization(axis=axis)(conv) conv = Activation('relu')(conv) if dropout > 0: conv = SpatialDropout2D(dropout)(conv) return conv
Example #2
Source File: models.py From kaggle-carvana-2017 with MIT License | 5 votes |
def get_unet_resnet(input_shape): resnet_base = ResNet50(input_shape=input_shape, include_top=False) if args.show_summary: resnet_base.summary() for l in resnet_base.layers: l.trainable = True conv1 = resnet_base.get_layer("activation_1").output conv2 = resnet_base.get_layer("activation_10").output conv3 = resnet_base.get_layer("activation_22").output conv4 = resnet_base.get_layer("activation_40").output conv5 = resnet_base.get_layer("activation_49").output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_block_simple(up6, 256, "conv6_1") conv6 = conv_block_simple(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_block_simple(up7, 192, "conv7_1") conv7 = conv_block_simple(conv7, 192, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_block_simple(up8, 128, "conv8_1") conv8 = conv_block_simple(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_block_simple(up9, 64, "conv9_1") conv9 = conv_block_simple(conv9, 64, "conv9_2") vgg = VGG16(input_shape=input_shape, input_tensor=resnet_base.input, include_top=False) for l in vgg.layers: l.trainable = False vgg_first_conv = vgg.get_layer("block1_conv2").output up10 = concatenate([UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1) conv10 = conv_block_simple(up10, 32, "conv10_1") conv10 = conv_block_simple(conv10, 32, "conv10_2") conv10 = SpatialDropout2D(0.2)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10) model = Model(resnet_base.input, x) return model
Example #3
Source File: models.py From kaggle-carvana-2017 with MIT License | 5 votes |
def get_simple_unet(input_shape): img_input = Input(input_shape) conv1 = conv_block_simple(img_input, 32, "conv1_1") conv1 = conv_block_simple(conv1, 32, "conv1_2") pool1 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool1")(conv1) conv2 = conv_block_simple(pool1, 64, "conv2_1") conv2 = conv_block_simple(conv2, 64, "conv2_2") pool2 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool2")(conv2) conv3 = conv_block_simple(pool2, 128, "conv3_1") conv3 = conv_block_simple(conv3, 128, "conv3_2") pool3 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool3")(conv3) conv4 = conv_block_simple(pool3, 256, "conv4_1") conv4 = conv_block_simple(conv4, 256, "conv4_2") conv4 = conv_block_simple(conv4, 256, "conv4_3") up5 = concatenate([UpSampling2D()(conv4), conv3], axis=-1) conv5 = conv_block_simple(up5, 128, "conv5_1") conv5 = conv_block_simple(conv5, 128, "conv5_2") up6 = concatenate([UpSampling2D()(conv5), conv2], axis=-1) conv6 = conv_block_simple(up6, 64, "conv6_1") conv6 = conv_block_simple(conv6, 64, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv1], axis=-1) conv7 = conv_block_simple(up7, 32, "conv7_1") conv7 = conv_block_simple(conv7, 32, "conv7_2") conv7 = SpatialDropout2D(0.2)(conv7) prediction = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv7) model = Model(img_input, prediction) return model
Example #4
Source File: models.py From kaggle-carvana-2017 with MIT License | 5 votes |
def get_unet_mobilenet(input_shape): base_model = MobileNet(include_top=False, input_shape=input_shape) conv1 = base_model.get_layer('conv_pw_1_relu').output conv2 = base_model.get_layer('conv_pw_3_relu').output conv3 = base_model.get_layer('conv_pw_5_relu').output conv4 = base_model.get_layer('conv_pw_11_relu').output conv5 = base_model.get_layer('conv_pw_13_relu').output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_block_simple(up6, 256, "conv6_1") conv6 = conv_block_simple(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_block_simple(up7, 256, "conv7_1") conv7 = conv_block_simple(conv7, 256, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_block_simple(up8, 192, "conv8_1") conv8 = conv_block_simple(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_block_simple(up9, 96, "conv9_1") conv9 = conv_block_simple(conv9, 64, "conv9_2") up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1) conv10 = conv_block_simple(up10, 48, "conv10_1") conv10 = conv_block_simple(conv10, 32, "conv10_2") conv10 = SpatialDropout2D(0.2)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10) model = Model(base_model.input, x) return model
Example #5
Source File: models.py From kaggle-carvana-2017 with MIT License | 5 votes |
def get_unet_inception_resnet_v2(input_shape): base_model = InceptionResNetV2(include_top=False, input_shape=input_shape) conv1 = base_model.get_layer('activation_3').output conv2 = base_model.get_layer('activation_5').output conv3 = base_model.get_layer('block35_10_ac').output conv4 = base_model.get_layer('block17_20_ac').output conv5 = base_model.get_layer('conv_7b_ac').output up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1) conv6 = conv_block_simple(up6, 256, "conv6_1") conv6 = conv_block_simple(conv6, 256, "conv6_2") up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1) conv7 = conv_block_simple(up7, 256, "conv7_1") conv7 = conv_block_simple(conv7, 256, "conv7_2") up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1) conv8 = conv_block_simple(up8, 128, "conv8_1") conv8 = conv_block_simple(conv8, 128, "conv8_2") up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1) conv9 = conv_block_simple(up9, 64, "conv9_1") conv9 = conv_block_simple(conv9, 64, "conv9_2") up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1) conv10 = conv_block_simple(up10, 48, "conv10_1") conv10 = conv_block_simple(conv10, 32, "conv10_2") conv10 = SpatialDropout2D(0.4)(conv10) x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10) model = Model(base_model.input, x) return model
Example #6
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_tiny_conv_dropout_random(self): np.random.seed(1988) num_samples = 1 input_dim = 8 input_shape = (input_dim, input_dim, 3) num_kernels = 2 kernel_height = 5 kernel_width = 5 hidden_dim = 4 # Define a model model = Sequential() model.add( Conv2D( input_shape=input_shape, filters=num_kernels, kernel_size=(kernel_height, kernel_width), ) ) model.add(SpatialDropout2D(0.5)) model.add(Flatten()) model.add(Dense(hidden_dim)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model)
Example #7
Source File: a02_zf_unet_model.py From Urban3d with MIT License | 5 votes |
def multi_conv_layer(x, layers, size, dropout, batch_norm): from keras.layers import Conv2D from keras.layers.normalization import BatchNormalization from keras.layers.core import SpatialDropout2D, Activation for i in range(layers): x = Conv2D(size, (3, 3), padding='same')(x) if batch_norm is True: x = BatchNormalization(axis=1)(x) x = Activation('relu')(x) if dropout > 0: x = SpatialDropout2D(dropout)(x) return x
Example #8
Source File: unet.py From robin with MIT License | 5 votes |
def double_conv_layer(inputs, filter): conv = Conv2D(filter, (3, 3), padding='same', kernel_initializer='he_normal')(inputs) conv = BatchNormalization(axis=3)(conv) conv = Activation('relu')(conv) conv = Conv2D(filter, (3, 3), padding='same', kernel_initializer='he_normal')(conv) conv = BatchNormalization(axis=3)(conv) conv = Activation('relu')(conv) conv = SpatialDropout2D(0.1)(conv) return conv
Example #9
Source File: encoder.py From enet-keras with MIT License | 4 votes |
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other = MaxPooling2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) return encoder
Example #10
Source File: encoder.py From enet-keras with MIT License | 4 votes |
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other, indices = MaxPoolingWithArgmax2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) if downsample: return encoder, indices else: return encoder