Python keras.layers.ZeroPadding2D() Examples
The following are 30
code examples of keras.layers.ZeroPadding2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: yolo.py From ImageAI with MIT License | 7 votes |
def _conv_block(inp, convs, do_skip=True): x = inp count = 0 for conv in convs: if count == (len(convs) - 2) and do_skip: skip_connection = x count += 1 if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings x = Conv2D(conv['filter'], conv['kernel'], strides=conv['stride'], padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings name='conv_' + str(conv['layer_idx']), use_bias=False if conv['bnorm'] else True)(x) if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x) if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x) return add([skip_connection, x]) if do_skip else x
Example #2
Source File: yolov3_weights_to_keras.py From ai-platform with MIT License | 7 votes |
def _conv_block(inp, convs, skip=True): x = inp count = 0 len_convs = len(convs) for conv in convs: if count == (len_convs - 2) and skip: skip_connection = x count += 1 if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top x = Conv2D(conv['filter'], conv['kernel'], strides=conv['stride'], padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top name='conv_' + str(conv['layer_idx']), use_bias=False if conv['bnorm'] else True)(x) if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x) if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x) return add([skip_connection, x]) if skip else x #SPP block uses three pooling layers of sizes [5, 9, 13] with strides one and all outputs together with the input are concatenated to be fed #to the FC block
Example #3
Source File: hourglass.py From keras-centernet with MIT License | 6 votes |
def residual(_x, out_dim, name, stride=1): shortcut = _x num_channels = K.int_shape(shortcut)[-1] _x = ZeroPadding2D(padding=1, name=name + '.pad1')(_x) _x = Conv2D(out_dim, 3, strides=stride, use_bias=False, name=name + '.conv1')(_x) _x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(_x) _x = Activation('relu', name=name + '.relu1')(_x) _x = Conv2D(out_dim, 3, padding='same', use_bias=False, name=name + '.conv2')(_x) _x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(_x) if num_channels != out_dim or stride != 1: shortcut = Conv2D(out_dim, 1, strides=stride, use_bias=False, name=name + '.shortcut.0')( shortcut) shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut) _x = Add(name=name + '.add')([_x, shortcut]) _x = Activation('relu', name=name + '.relu')(_x) return _x
Example #4
Source File: inception_blocks_v2.py From Facial-Recognition-using-Facenet with MIT License | 6 votes |
def inception_block_2b(X): #inception4e X_3x3 = fr_utils.conv2d_bn(X, layer='inception_4e_3x3', cv1_out=160, cv1_filter=(1, 1), cv2_out=256, cv2_filter=(3, 3), cv2_strides=(2, 2), padding=(1, 1)) X_5x5 = fr_utils.conv2d_bn(X, layer='inception_4e_5x5', cv1_out=64, cv1_filter=(1, 1), cv2_out=128, cv2_filter=(5, 5), cv2_strides=(2, 2), padding=(2, 2)) X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool) inception = concatenate([X_3x3, X_5x5, X_pool], axis=1) return inception
Example #5
Source File: inception_blocks_v2.py From Facial-Recognition-using-Facenet with MIT License | 6 votes |
def inception_block_1c(X): X_3x3 = fr_utils.conv2d_bn(X, layer='inception_3c_3x3', cv1_out=128, cv1_filter=(1, 1), cv2_out=256, cv2_filter=(3, 3), cv2_strides=(2, 2), padding=(1, 1)) X_5x5 = fr_utils.conv2d_bn(X, layer='inception_3c_5x5', cv1_out=32, cv1_filter=(1, 1), cv2_out=64, cv2_filter=(5, 5), cv2_strides=(2, 2), padding=(2, 2)) X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool) inception = concatenate([X_3x3, X_5x5, X_pool], axis=1) return inception
Example #6
Source File: fr_utils.py From Facial-Recognition-using-Facenet with MIT License | 6 votes |
def conv2d_bn(x, layer=None, cv1_out=None, cv1_filter=(1, 1), cv1_strides=(1, 1), cv2_out=None, cv2_filter=(3, 3), cv2_strides=(1, 1), padding=None): num = '' if cv2_out == None else '1' tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x) tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor) tensor = Activation('relu')(tensor) if padding == None: return tensor tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor) if cv2_out == None: return tensor tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor) tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor) tensor = Activation('relu')(tensor) return tensor
Example #7
Source File: Xception.py From Semantic-Segmentation with MIT License | 6 votes |
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1): # 计算padding的数量,hw是否需要收缩 if stride == 1: return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x)
Example #8
Source File: model.py From raster-deep-learning with Apache License 2.0 | 5 votes |
def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layers """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #9
Source File: hourglass.py From keras-centernet with MIT License | 5 votes |
def convolution(_x, k, out_dim, name, stride=1): padding = (k - 1) // 2 _x = ZeroPadding2D(padding=padding, name=name + '.pad')(_x) _x = Conv2D(out_dim, k, strides=stride, use_bias=False, name=name + '.conv')(_x) _x = BatchNormalization(epsilon=1e-5, name=name + '.bn')(_x) _x = Activation('relu', name=name + '.relu')(_x) return _x
Example #10
Source File: model.py From keras-YOLOv3-mobilenet with MIT License | 5 votes |
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
Example #11
Source File: models.py From pOSAL with MIT License | 5 votes |
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1): """Implements right 'same' padding for even kernel sizes Without this there is a 1 pixel drift when stride = 2 Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution """ if stride == 1: return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x)
Example #12
Source File: models.py From pOSAL with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): """ SepConv with BN between depthwise & pointwise. Optionally add activation after BN Implements right "same" padding for even kernel sizes Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution depth_activation: flag to use activation between depthwise & poinwise convs epsilon: epsilon to use in BN layer """ if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' if not depth_activation: x = Activation('relu')(x) x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #13
Source File: inception_blocks_v2.py From Facial-Recognition-using-Facenet with MIT License | 5 votes |
def inception_block_1b(X): X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3b_3x3_conv1')(X) X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn1')(X_3x3) X_3x3 = Activation('relu')(X_3x3) X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3) X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3b_3x3_conv2')(X_3x3) X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn2')(X_3x3) X_3x3 = Activation('relu')(X_3x3) X_5x5 = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3b_5x5_conv1')(X) X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn1')(X_5x5) X_5x5 = Activation('relu')(X_5x5) X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5) X_5x5 = Conv2D(64, (5, 5), data_format='channels_first', name='inception_3b_5x5_conv2')(X_5x5) X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn2')(X_5x5) X_5x5 = Activation('relu')(X_5x5) X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X) X_pool = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_pool_conv')(X_pool) X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_pool_bn')(X_pool) X_pool = Activation('relu')(X_pool) X_pool = ZeroPadding2D(padding=(4, 4), data_format='channels_first')(X_pool) X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_1x1_conv')(X) X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_1x1_bn')(X_1x1) X_1x1 = Activation('relu')(X_1x1) inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1) return inception
Example #14
Source File: inception_blocks_v2.py From Facial-Recognition-using-Facenet with MIT License | 5 votes |
def inception_block_1a(X): """ Implementation of an inception block """ X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X) X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3) X_3x3 = Activation('relu')(X_3x3) X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3) X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3) X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3) X_3x3 = Activation('relu')(X_3x3) X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X) X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5) X_5x5 = Activation('relu')(X_5x5) X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5) X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5) X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5) X_5x5 = Activation('relu')(X_5x5) X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool) X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool) X_pool = Activation('relu')(X_pool) X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool) X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X) X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1) X_1x1 = Activation('relu')(X_1x1) # CONCAT inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1) return inception
Example #15
Source File: model.py From deep-learning-explorer with Apache License 2.0 | 5 votes |
def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layres """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #16
Source File: models.py From ImageAI with MIT License | 5 votes |
def residual_block(input, channels, num_blocks): network = ZeroPadding2D(((1,0), (1,0)))(input) network = NetworkConv2D_BN_Leaky(input=network,channels=channels, kernel_size=(3,3), strides=(2,2), padding="valid") for blocks in range(num_blocks): network_1 = NetworkConv2D_BN_Leaky(input=network, channels= channels // 2, kernel_size=(1,1)) network_1 = NetworkConv2D_BN_Leaky(input=network_1,channels= channels, kernel_size=(3,3)) network = Add()([network, network_1]) return network
Example #17
Source File: model.py From DeepTL-Lane-Change-Classification with MIT License | 5 votes |
def resnet_graph(input_image, architecture, stage5=False): assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(axis=3, name='bn_conv1')(x) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i)) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #18
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_zeropad_simple(self): input_shape = (48, 48, 3) model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=input_shape)) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model)
Example #19
Source File: model.py From segmentation_training_pipeline with MIT License | 5 votes |
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1): """Implements right 'same' padding for even kernel sizes Without this there is a 1 pixel drift when stride = 2 Args: x: input tensor filters: num of filters in pointwise convolution prefix: prefix before name stride: stride at depthwise conv kernel_size: kernel size for depthwise convolution rate: atrous rate for depthwise convolution """ if stride == 1: return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x)
Example #20
Source File: model.py From Mask-RCNN-Pedestrian-Detection with MIT License | 5 votes |
def resnet_graph(input_image, architecture, stage5=False, train_bn=True): """Build a ResNet graph. architecture: Can be resnet50 or resnet101 stage5: Boolean. If False, stage5 of the network is not created train_bn: Boolean. Train or freeze Batch Norm layres """ assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(name='bn_conv1')(x, training=train_bn) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn) C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn) # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn) x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn) C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn) # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn) block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn) x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn) C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn) else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #21
Source File: model.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 5 votes |
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
Example #22
Source File: model.py From keras-yolov3-KF-objectTracking with MIT License | 5 votes |
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
Example #23
Source File: StarGAN.py From StarGAN-Keras with MIT License | 5 votes |
def build_discriminator(self): """Discriminator network with PatchGAN.""" inp_img = Input(shape = (self.image_size, self.image_size, 3)) x = ZeroPadding2D(padding = 1)(inp_img) x = Conv2D(filters = self.d_conv_dim, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x) x = LeakyReLU(0.01)(x) curr_dim = self.d_conv_dim for i in range(1, self.d_repeat_num): x = ZeroPadding2D(padding = 1)(x) x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid')(x) x = LeakyReLU(0.01)(x) curr_dim = curr_dim * 2 kernel_size = int(self.image_size / np.power(2, self.d_repeat_num)) out_src = ZeroPadding2D(padding = 1)(x) out_src = Conv2D(filters = 1, kernel_size = 3, strides = 1, padding = 'valid', use_bias = False)(out_src) out_cls = Conv2D(filters = self.c_dim, kernel_size = kernel_size, strides = 1, padding = 'valid', use_bias = False)(x) out_cls = Reshape((self.c_dim, ))(out_cls) return Model(inp_img, [out_src, out_cls])
Example #24
Source File: StarGAN.py From StarGAN-Keras with MIT License | 5 votes |
def ResidualBlock(self, inp, dim_out): """Residual Block with instance normalization.""" x = ZeroPadding2D(padding = 1)(inp) x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) x = ReLU()(x) x = ZeroPadding2D(padding = 1)(x) x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) return Add()([inp, x])
Example #25
Source File: sgan.py From Keras-GAN with MIT License | 5 votes |
def build_discriminator(self): model = Sequential() model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Flatten()) model.summary() img = Input(shape=self.img_shape) features = model(img) valid = Dense(1, activation="sigmoid")(features) label = Dense(self.num_classes+1, activation="softmax")(features) return Model(img, [valid, label])
Example #26
Source File: model.py From segmentation-unet-maskrcnn with MIT License | 5 votes |
def resnet_graph(input_image, architecture, stage5=False): assert architecture in ["resnet50", "resnet101"] # Stage 1 x = KL.ZeroPadding2D((3, 3))(input_image) x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(axis=3, name='bn_conv1')(x) x = KL.Activation('relu')(x) C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i)) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #27
Source File: model.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1, 0), (1, 0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters // 2, (1, 1)), DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x) x = Add()([x, y]) return x
Example #28
Source File: darknet53.py From yolo3-keras with MIT License | 5 votes |
def resblock_body(x, num_filters, num_blocks): x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = DarknetConv2D_BN_Leaky(num_filters//2, (1,1))(x) y = DarknetConv2D_BN_Leaky(num_filters, (3,3))(y) x = Add()([x,y]) return x #---------------------------------------------------# # darknet53 的主体部分 #---------------------------------------------------#
Example #29
Source File: model.py From keras-yolo3-master with MIT License | 5 votes |
def resblock_body(x, num_filters, num_blocks): '''A series of resblocks starting with a downsampling Convolution2D''' # Darknet uses left and top padding instead of 'same' mode x = ZeroPadding2D(((1,0),(1,0)))(x) x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x) for i in range(num_blocks): y = compose( DarknetConv2D_BN_Leaky(num_filters//2, (1,1)), DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x) x = Add()([x,y]) return x
Example #30
Source File: layers_builder.py From PSPNet-Keras-tensorflow with MIT License | 5 votes |
def residual_conv(prev, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False): lvl = str(lvl) sub_lvl = str(sub_lvl) names = ["conv" + lvl + "_" + sub_lvl + "_1x1_reduce", "conv" + lvl + "_" + sub_lvl + "_1x1_reduce_bn", "conv" + lvl + "_" + sub_lvl + "_3x3", "conv" + lvl + "_" + sub_lvl + "_3x3_bn", "conv" + lvl + "_" + sub_lvl + "_1x1_increase", "conv" + lvl + "_" + sub_lvl + "_1x1_increase_bn"] if modify_stride is False: prev = Conv2D(64 * level, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev) elif modify_stride is True: prev = Conv2D(64 * level, (1, 1), strides=(2, 2), name=names[0], use_bias=False)(prev) prev = BN(name=names[1])(prev) prev = Activation('relu')(prev) prev = ZeroPadding2D(padding=(pad, pad))(prev) prev = Conv2D(64 * level, (3, 3), strides=(1, 1), dilation_rate=pad, name=names[2], use_bias=False)(prev) prev = BN(name=names[3])(prev) prev = Activation('relu')(prev) prev = Conv2D(256 * level, (1, 1), strides=(1, 1), name=names[4], use_bias=False)(prev) prev = BN(name=names[5])(prev) return prev