Python keras.layers.convolutional.ZeroPadding2D() Examples
The following are 30
code examples of keras.layers.convolutional.ZeroPadding2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.convolutional
, or try the search function
.
Example #1
Source File: example.py From residual_block_keras with GNU General Public License v3.0 | 7 votes |
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28): model = keras.models.Sequential() first_layer_channel = 128 if is_mnist: # size to be changed to 32,32 model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32) # the first conv model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same')) else: model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols))) model.add(Activation('relu')) # [residual-based Conv layers] residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel) model.add(residual_blocks) model.add(BatchNormalization(axis=1)) model.add(Activation('relu')) # [Classifier] model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) # [END] return model
Example #2
Source File: wresnet.py From Theano-MPI with Educational Community License v2.0 | 7 votes |
def build_model(self): img_input = Input(shape=(img_channels, img_rows, img_cols)) # one conv at the beginning (spatial size: 32x32) x = ZeroPadding2D((1, 1))(img_input) x = Convolution2D(16, nb_row=3, nb_col=3)(x) # Stage 1 (spatial size: 32x32) x = bottleneck(x, n, 16, 16 * k, dropout=0.3, subsample=(1, 1)) # Stage 2 (spatial size: 16x16) x = bottleneck(x, n, 16 * k, 32 * k, dropout=0.3, subsample=(2, 2)) # Stage 3 (spatial size: 8x8) x = bottleneck(x, n, 32 * k, 64 * k, dropout=0.3, subsample=(2, 2)) x = BatchNormalization(mode=0, axis=1)(x) x = Activation('relu')(x) x = AveragePooling2D((8, 8), strides=(1, 1))(x) x = Flatten()(x) preds = Dense(nb_classes, activation='softmax')(x) self.model = Model(input=img_input, output=preds) self.keras_get_params()
Example #3
Source File: densenet.py From chinese_ocr with MIT License | 6 votes |
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4): x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if(dropout_rate): x = Dropout(dropout_rate)(x) if(pooltype == 2): x = AveragePooling2D((2, 2), strides=(2, 2))(x) elif(pooltype == 1): x = ZeroPadding2D(padding = (0, 1))(x) x = AveragePooling2D((2, 2), strides=(2, 1))(x) elif(pooltype == 3): x = AveragePooling2D((2, 2), strides=(2, 1))(x) return x, nb_filter
Example #4
Source File: densenet.py From chinese_ocr with MIT License | 6 votes |
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4): x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) if pooltype == 2: x = AveragePooling2D((2, 2), strides=(2, 2))(x) elif pooltype == 1: x = ZeroPadding2D(padding=(0, 1))(x) x = AveragePooling2D((2, 2), strides=(2, 1))(x) elif pooltype == 3: x = AveragePooling2D((2, 2), strides=(2, 1))(x) return x, nb_filter
Example #5
Source File: densenet.py From chinese_ocr with Apache License 2.0 | 6 votes |
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4): x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if(dropout_rate): x = Dropout(dropout_rate)(x) if(pooltype == 2): x = AveragePooling2D((2, 2), strides=(2, 2))(x) elif(pooltype == 1): x = ZeroPadding2D(padding = (0, 1))(x) x = AveragePooling2D((2, 2), strides=(2, 1))(x) elif(pooltype == 3): x = AveragePooling2D((2, 2), strides=(2, 1))(x) return x, nb_filter
Example #6
Source File: densenet.py From chinese_ocr with Apache License 2.0 | 6 votes |
def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4): x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if(dropout_rate): x = Dropout(dropout_rate)(x) if(pooltype == 2): x = AveragePooling2D((2, 2), strides=(2, 2))(x) elif(pooltype == 1): x = ZeroPadding2D(padding = (0, 1))(x) x = AveragePooling2D((2, 2), strides=(2, 1))(x) elif(pooltype == 3): x = AveragePooling2D((2, 2), strides=(2, 1))(x) return x, nb_filter
Example #7
Source File: densenet.py From deep_learning with MIT License | 6 votes |
def transition_block(input,nb_filter,dropout_rate=None,pooltype=1,weight_decay=1e-4): x = BatchNormalization(axis=-1,epsilon=1.1e-5)(input) x = Activation('relu')(x) x = Conv2D(nb_filter,(1,1),kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) if(dropout_rate): x = Dropout(dropout_rate)(x) if(pooltype==2): x = AveragePooling2D((2,2),strides=(2,2))(x) elif(pooltype==1): x = ZeroPadding2D(padding=(0,1))(x) x = AveragePooling2D((2,2),strides=(2,1))(x) elif(pooltype==3): x = AveragePooling2D((2,2),strides=(2,1))(x) return x,nb_filter
Example #8
Source File: visual_model_zoo.py From visual_turing_test-tutorial with MIT License | 5 votes |
def create(self): model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self._visual_dim)) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Flatten()) self._model_output_dim = 4096 model.add(Dense(self._model_output_dim, activation='relu')) model.add(Dropout(0.5)) if self._weights_path: model.load_weights(self._weights_path) return model
Example #9
Source File: MaskRCNN.py From PyTorch-Luna16 with Apache License 2.0 | 5 votes |
def resnet_graph(input_image, architecture, stage5=False): assert architecture in ["resnet50", "resnet101"] # Stage 1 x = ZeroPadding2D((3, 3))(input_image) x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x) x = BatchNorm(axis=3, name='bn_conv1')(x) x = Activation('relu')(x) C1 = x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x) # Stage 2 x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') # Stage 3 x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') # Stage 4 x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') block_count = {"resnet50": 5, "resnet101": 22}[architecture] for i in range(block_count): x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i)) C4 = x # Stage 5 if stage5: x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') else: C5 = None return [C1, C2, C3, C4, C5] ############################################################ # Proposal Layer ############################################################
Example #10
Source File: fish_detection_example.py From Deep-Learning-By-Example with MIT License | 5 votes |
def create_cnn_model_arch(): pool_size = 2 # we will use 2x2 pooling throughout conv_depth_1 = 32 # we will initially have 32 kernels per conv. layer... conv_depth_2 = 64 # ...switching to 64 after the first pooling layer kernel_size = 3 # we will use 3x3 kernels throughout drop_prob = 0.5 # dropout in the FC layer with probability 0.5 hidden_size = 32 # the FC layer will have 512 neurons num_classes = 8 # there are 8 fish types # Conv [32] -> Conv [32] -> Pool cnn_model = Sequential() cnn_model.add(ZeroPadding2D((1, 1), input_shape=(3, 32, 32), dim_ordering='th')) cnn_model.add(Convolution2D(conv_depth_1, kernel_size, kernel_size, activation='relu', dim_ordering='th')) cnn_model.add(ZeroPadding2D((1, 1), dim_ordering='th')) cnn_model.add(Convolution2D(conv_depth_1, kernel_size, kernel_size, activation='relu', dim_ordering='th')) cnn_model.add(MaxPooling2D(pool_size=(pool_size, pool_size), strides=(2, 2), dim_ordering='th')) # Conv [64] -> Conv [64] -> Pool cnn_model.add(ZeroPadding2D((1, 1), dim_ordering='th')) cnn_model.add(Convolution2D(conv_depth_2, kernel_size, kernel_size, activation='relu', dim_ordering='th')) cnn_model.add(ZeroPadding2D((1, 1), dim_ordering='th')) cnn_model.add(Convolution2D(conv_depth_2, kernel_size, kernel_size, activation='relu', dim_ordering='th')) cnn_model.add(MaxPooling2D(pool_size=(pool_size, pool_size), strides=(2, 2), dim_ordering='th')) # Now flatten to 1D, apply FC then ReLU (with dropout) and finally softmax(output layer) cnn_model.add(Flatten()) cnn_model.add(Dense(hidden_size, activation='relu')) cnn_model.add(Dropout(drop_prob)) cnn_model.add(Dense(hidden_size, activation='relu')) cnn_model.add(Dropout(drop_prob)) cnn_model.add(Dense(num_classes, activation='softmax')) # initiating the stochastic gradient descent optimiser stochastic_gradient_descent = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True) cnn_model.compile(optimizer=stochastic_gradient_descent, # using the stochastic gradient descent optimiser loss='categorical_crossentropy') # using the cross-entropy loss function return cnn_model
Example #11
Source File: pool_net.py From mltools with MIT License | 5 votes |
def _small_model(self): ''' Alternative model architecture with fewer layers for computationally expensive training datasets ''' print 'Compiling Small Net...' model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self.input_shape)) model.add(Convolution2D(64, self.kernel_size, self.kernel_size,activation='relu', input_shape=self.input_shape)) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, self.kernel_size, self.kernel_size, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2048, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.nb_classes, activation='softmax')) sgd = SGD(lr=self.lr, decay=0.01, momentum=0.9, nesterov=True) model.compile(optimizer = 'sgd', loss = 'categorical_crossentropy') return model
Example #12
Source File: simple_cnn.py From OCR-Handwritten-Text with Apache License 2.0 | 5 votes |
def ConvBlock(layers, model, filters): for i in range(layers): model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(filters, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Dropout(0.25))
Example #13
Source File: alexnet.py From keras-deepcv with MIT License | 4 votes |
def alexnet_model(img_shape=(224, 224, 3), n_classes=10, l2_reg=0., weights=None): # Initialize model alexnet = Sequential() # Layer 1 alexnet.add(Conv2D(96, (11, 11), input_shape=img_shape, padding='same', kernel_regularizer=l2(l2_reg))) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(MaxPooling2D(pool_size=(2, 2))) # Layer 2 alexnet.add(Conv2D(256, (5, 5), padding='same')) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(MaxPooling2D(pool_size=(2, 2))) # Layer 3 alexnet.add(ZeroPadding2D((1, 1))) alexnet.add(Conv2D(512, (3, 3), padding='same')) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(MaxPooling2D(pool_size=(2, 2))) # Layer 4 alexnet.add(ZeroPadding2D((1, 1))) alexnet.add(Conv2D(1024, (3, 3), padding='same')) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) # Layer 5 alexnet.add(ZeroPadding2D((1, 1))) alexnet.add(Conv2D(1024, (3, 3), padding='same')) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(MaxPooling2D(pool_size=(2, 2))) # Layer 6 alexnet.add(Flatten()) alexnet.add(Dense(3072)) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(Dropout(0.5)) # Layer 7 alexnet.add(Dense(4096)) alexnet.add(BatchNormalization()) alexnet.add(Activation('relu')) alexnet.add(Dropout(0.5)) # Layer 8 alexnet.add(Dense(n_classes)) alexnet.add(BatchNormalization()) alexnet.add(Activation('softmax')) if weights is not None: alexnet.load_weights(weights) return alexnet
Example #14
Source File: visual_model_zoo.py From visual_turing_test-tutorial with MIT License | 4 votes |
def create(self): model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=self._visual_dim)) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) self._model_output_dim = 4096 model.add(Dense(self._model_output_dim, activation='relu')) model.add(Dropout(0.5)) #model.add(Dense(1000, activation='softmax')) if self._weights_path: model.load_weights(self._weights_path) return model
Example #15
Source File: example.py From residual_block_keras with GNU General Public License v3.0 | 4 votes |
def design_for_residual_blocks(num_channel_input=1): '''''' model = Sequential() # it's a CONTAINER, not MODEL # set numbers num_big_blocks = 3 image_patch_sizes = [[3,3]]*num_big_blocks pool_sizes = [(2,2)]*num_big_blocks n_features = [128, 256, 512, 512, 1024] n_features_next = [256, 512, 512, 512, 1024] height_input = 32 width_input = 32 for conv_idx in range(num_big_blocks): n_feat_here = n_features[conv_idx] # residual block 0 model.add(residual_blocks.building_residual_block( (num_channel_input, height_input, width_input), n_feat_here, kernel_sizes=image_patch_sizes[conv_idx] )) # residual block 1 (you can add it as you want (and your resources allow..)) if False: model.add(residual_blocks.building_residual_block( (n_feat_here, height_input, width_input), n_feat_here, kernel_sizes=image_patch_sizes[conv_idx] )) # the last residual block N-1 # the last one : pad zeros, subsamples, and increase #channels pad_height = compute_padding_length(height_input, pool_sizes[conv_idx][0], image_patch_sizes[conv_idx][0]) pad_width = compute_padding_length(width_input, pool_sizes[conv_idx][1], image_patch_sizes[conv_idx][1]) model.add(ZeroPadding2D(padding=(pad_height,pad_width))) height_input += 2*pad_height width_input += 2*pad_width n_feat_next = n_features_next[conv_idx] model.add(residual_blocks.building_residual_block( (n_feat_here, height_input, width_input), n_feat_next, kernel_sizes=image_patch_sizes[conv_idx], is_subsample=True, subsample=pool_sizes[conv_idx] )) height_input, width_input = model.output_shape[2:] # width_input = int(width_input/pool_sizes[conv_idx][1]) num_channel_input = n_feat_next # Add average pooling at the end: print('Average pooling, from (%d,%d) to (1,1)' % (height_input, width_input)) model.add(AveragePooling2D(pool_size=(height_input, width_input))) return model
Example #16
Source File: BMM_attention_model.py From BMM_attentional_CNN with GNU General Public License v3.0 | 4 votes |
def minst_attention(inc_noise=False, attention=True): #make layers inputs = Input(shape=(1,image_size,image_size),name='input') conv_1a = Convolution2D(32, 3, 3,activation='relu',name='conv_1') maxp_1a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_1') norm_1a = crosschannelnormalization(name="convpool_1") zero_1a = ZeroPadding2D((2,2),name='convzero_1') conv_2a = Convolution2D(32,3,3,activation='relu',name='conv_2') maxp_2a = MaxPooling2D((3, 3), strides=(2,2),name='convmax_2') norm_2a = crosschannelnormalization(name="convpool_2") zero_2a = ZeroPadding2D((2,2),name='convzero_2') dense_1a = Lambda(global_average_pooling,output_shape=global_average_pooling_shape,name='dense_1') dense_2a = Dense(10, activation = 'softmax', init='uniform',name='dense_2') #make actual model if inc_noise: inputs_noise = noise.GaussianNoise(2.5)(inputs) input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs_noise) else: input_pad = ZeroPadding2D((1,1),input_shape=(1,image_size,image_size),name='input_pad')(inputs) conv_1 = conv_1a(input_pad) conv_1 = maxp_1a(conv_1) conv_1 = norm_1a(conv_1) conv_1 = zero_1a(conv_1) conv_2_x = conv_2a(conv_1) conv_2 = maxp_2a(conv_2_x) conv_2 = norm_2a(conv_2) conv_2 = zero_2a(conv_2) conv_2 = Dropout(0.5)(conv_2) dense_1 = dense_1a(conv_2) dense_2 = dense_2a(dense_1) conv_shape1 = Lambda(change_shape1,output_shape=(32,),name='chg_shape')(conv_2_x) find_att = dense_2a(conv_shape1) if attention: find_att = Lambda(attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2]) else: find_att = Lambda(no_attention_control,output_shape=att_shape,name='att_con')([find_att,dense_2]) zero_3a = ZeroPadding2D((1,1),name='convzero_3')(find_att) apply_attention = Merge(mode='mul',name='attend')([zero_3a,conv_1]) conv_3 = conv_2a(apply_attention) conv_3 = maxp_2a(conv_3) conv_3 = norm_2a(conv_3) conv_3 = zero_2a(conv_3) dense_3 = dense_1a(conv_3) dense_4 = dense_2a(dense_3) model = Model(input=inputs,output=dense_4) return model
Example #17
Source File: convnets.py From convnets-keras with MIT License | 4 votes |
def VGG_16(weights_path=None, heatmap=False): model = Sequential() if heatmap: model.add(ZeroPadding2D((1, 1), input_shape=(3, None, None))) else: model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) if heatmap: model.add(Convolution2D(4096, 7, 7, activation='relu', name='dense_1')) model.add(Convolution2D(4096, 1, 1, activation='relu', name='dense_2')) model.add(Convolution2D(1000, 1, 1, name='dense_3')) model.add(Softmax4D(axis=1, name='softmax')) else: model.add(Flatten(name='flatten')) model.add(Dense(4096, activation='relu', name='dense_1')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu', name='dense_2')) model.add(Dropout(0.5)) model.add(Dense(1000, name='dense_3')) model.add(Activation('softmax', name='softmax')) if weights_path: model.load_weights(weights_path) return model
Example #18
Source File: wresnet.py From Theano-MPI with Educational Community License v2.0 | 4 votes |
def wide_basic(incoming, nb_in_filters, nb_out_filters, dropout=None, subsample=(2, 2)): nb_bottleneck_filter = nb_out_filters if nb_in_filters == nb_out_filters: # conv3x3 y = BatchNormalization(mode=0, axis=1)(incoming) y = Activation('relu')(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=subsample, init='he_normal', border_mode='valid')(y) # conv3x3 y = BatchNormalization(mode=0, axis=1)(y) y = Activation('relu')(y) if dropout is not None: y = Dropout(dropout)(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=(1, 1), init='he_normal', border_mode='valid')(y) return merge([incoming, y], mode='sum') else: # Residual Units for increasing dimensions # common BN, ReLU shortcut = BatchNormalization(mode=0, axis=1)(incoming) shortcut = Activation('relu')(shortcut) # conv3x3 y = ZeroPadding2D((1, 1))(shortcut) y = Convolution2D(nb_bottleneck_filter, nb_row=3, nb_col=3, subsample=subsample, init='he_normal', border_mode='valid')(y) # conv3x3 y = BatchNormalization(mode=0, axis=1)(y) y = Activation('relu')(y) if dropout is not None: y = Dropout(dropout)(y) y = ZeroPadding2D((1, 1))(y) y = Convolution2D(nb_out_filters, nb_row=3, nb_col=3, subsample=(1, 1), init='he_normal', border_mode='valid')(y) # shortcut shortcut = Convolution2D(nb_out_filters, nb_row=1, nb_col=1, subsample=subsample, init='he_normal', border_mode='same')(shortcut) return merge([shortcut, y], mode='sum')
Example #19
Source File: VGG_deconv.py From DeepLearningImplementations with MIT License | 4 votes |
def VGG_16(weights_path=None): """ VGG Model Keras specification args: weights_path (str) trained weights file path returns model (Keras model) """ model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(256, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Conv2D(512, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: print("Loading weights...") model.load_weights(weights_path) return model
Example #20
Source File: VGG.py From essence with GNU General Public License v3.0 | 4 votes |
def VGG_16(weights_path = None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) #Remove the last two layers to get the 4096D activations model = pop(model) model = pop(model) return model
Example #21
Source File: extract.py From essence with GNU General Public License v3.0 | 4 votes |
def VGG_16(weights_path = None): model = Sequential() model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
Example #22
Source File: encoder.py From enet-keras with MIT License | 4 votes |
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet_unpooling uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other, indices = MaxPoolingWithArgmax2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) if downsample: return encoder, indices else: return encoder
Example #23
Source File: encoder.py From enet-keras with MIT License | 4 votes |
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1): # main branch internal = output // internal_scale encoder = inp # 1x1 input_stride = 2 if downsample else 1 # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling encoder = Conv2D(internal, (input_stride, input_stride), # padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder) # Batch normalization + PReLU encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # conv if not asymmetric and not dilated: encoder = Conv2D(internal, (3, 3), padding='same')(encoder) elif asymmetric: encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder) encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder) elif dilated: encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder) else: raise(Exception('You shouldn\'t be here')) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = PReLU(shared_axes=[1, 2])(encoder) # 1x1 encoder = Conv2D(output, (1, 1), use_bias=False)(encoder) encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99 encoder = SpatialDropout2D(dropout_rate)(encoder) other = inp # other branch if downsample: other = MaxPooling2D()(other) other = Permute((1, 3, 2))(other) pad_feature_maps = output - inp.get_shape().as_list()[3] tb_pad = (0, 0) lr_pad = (0, pad_feature_maps) other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other) other = Permute((1, 3, 2))(other) encoder = add([encoder, other]) encoder = PReLU(shared_axes=[1, 2])(encoder) return encoder
Example #24
Source File: convnets.py From convnets-keras with MIT License | 4 votes |
def AlexNet(weights_path=None, heatmap=False): if heatmap: inputs = Input(shape=(3, None, None)) else: inputs = Input(shape=(3, 227, 227)) conv_1 = Convolution2D(96, 11, 11, subsample=(4, 4), activation='relu', name='conv_1')(inputs) conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1) conv_2 = crosschannelnormalization(name='convpool_1')(conv_2) conv_2 = ZeroPadding2D((2, 2))(conv_2) conv_2 = merge([ Convolution2D(128, 5, 5, activation='relu', name='conv_2_' + str(i + 1))( splittensor(ratio_split=2, id_split=i)(conv_2) ) for i in range(2)], mode='concat', concat_axis=1, name='conv_2') conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2) conv_3 = crosschannelnormalization()(conv_3) conv_3 = ZeroPadding2D((1, 1))(conv_3) conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3) conv_4 = ZeroPadding2D((1, 1))(conv_3) conv_4 = merge([ Convolution2D(192, 3, 3, activation='relu', name='conv_4_' + str(i + 1))( splittensor(ratio_split=2, id_split=i)(conv_4) ) for i in range(2)], mode='concat', concat_axis=1, name='conv_4') conv_5 = ZeroPadding2D((1, 1))(conv_4) conv_5 = merge([ Convolution2D(128, 3, 3, activation='relu', name='conv_5_' + str(i + 1))( splittensor(ratio_split=2, id_split=i)(conv_5) ) for i in range(2)], mode='concat', concat_axis=1, name='conv_5') dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name='convpool_5')(conv_5) if heatmap: dense_1 = Convolution2D(4096, 6, 6, activation='relu', name='dense_1')(dense_1) dense_2 = Convolution2D(4096, 1, 1, activation='relu', name='dense_2')(dense_1) dense_3 = Convolution2D(1000, 1, 1, name='dense_3')(dense_2) prediction = Softmax4D(axis=1, name='softmax')(dense_3) else: dense_1 = Flatten(name='flatten')(dense_1) dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1) dense_2 = Dropout(0.5)(dense_1) dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2) dense_3 = Dropout(0.5)(dense_2) dense_3 = Dense(1000, name='dense_3')(dense_3) prediction = Activation('softmax', name='softmax')(dense_3) model = Model(input=inputs, output=prediction) if weights_path: model.load_weights(weights_path) return model
Example #25
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_zero_padding_2d(): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 for data_format in ['channels_first', 'channels_last']: inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': ((1, 2), (3, 4)), 'data_format': data_format}, input_shape=inputs.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, offset, :, :], 0.) assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif data_format == 'channels_first': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, :, :, offset], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = convolutional.ZeroPadding2D(padding=((1, 2), (3, 4)), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for top_offset in [0]: assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, right_offset, :], 0.) assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif data_format == 'channels_first': for top_offset in [0]: assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, :, right_offset], 0.) assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
Example #26
Source File: model.py From DeepClassificationBot with MIT License | 4 votes |
def get_deep_anime_model(n_outputs=1000, input_size=128): '''The deep neural network used for deep anime bot''' conv = Sequential() conv.add(Convolution2D(64, 3, 3, activation='relu', input_shape=(3, input_size, input_size))) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(64, 3, 3, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(128, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(128, 1, 1, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 3, 3, activation='relu')) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(256, 1, 1, activation='relu')) conv.add(MaxPooling2D((2, 2), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 3, 3, activation='relu')) conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) conv.add(Convolution2D(512, 1, 1, activation='relu')) conv.add(AveragePooling2D((8, 8), strides=(2, 2))) conv.add(BatchNormalization()) # conv.add(Dropout(0.5)) # conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 3, 3, activation='relu')) # conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 3, 3, activation='relu')) # #conv.add(ZeroPadding2D((1, 1))) # conv.add(Convolution2D(512, 1, 1, activation='relu')) # conv.add(AveragePooling2D((4, 4))) # conv.add(BatchNormalization()) conv.add(Flatten()) conv.add(Dropout(0.5)) conv.add(Dense(2048)) conv.add(BatchNormalization()) conv.add(Dropout(0.7)) conv.add(Dense(2048)) conv.add(BatchNormalization()) conv.add(Dropout(0.7)) conv.add(Dense(n_outputs)) conv.add(Activation('softmax')) print(conv.summary()) return conv
Example #27
Source File: features.py From detection-2016-nipsws with MIT License | 4 votes |
def vgg_16(weights_path=None): model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, 224, 224))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='softmax')) if weights_path: model.load_weights(weights_path) return model
Example #28
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_zero_padding_2d(): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 for data_format in ['channels_first', 'channels_last']: inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': ((1, 2), (3, 4)), 'data_format': data_format}, input_shape=inputs.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, offset, :, :], 0.) assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif data_format == 'channels_first': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, :, :, offset], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = convolutional.ZeroPadding2D(padding=((1, 2), (3, 4)), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for top_offset in [0]: assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, right_offset, :], 0.) assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif data_format == 'channels_first': for top_offset in [0]: assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, :, right_offset], 0.) assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
Example #29
Source File: myVGG.py From Real-Time-Facial-Expression-Recognition-with-DeepLearning with MIT License | 4 votes |
def VGG_16(weights_path=None, shape=(48, 48)): model = Sequential() model.add(ZeroPadding2D((1,1), input_shape=(1, 48, 48))) model.add(Convolution2D(32, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(32, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(128, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) ''' model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(512, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) ''' model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(6, activation='softmax')) print ("Create model successfully") if weights_path: model.load_weights(weights_path) model.compile(optimizer='adam', loss='categorical_crossentropy', \ metrics=['accuracy']) return model
Example #30
Source File: convolutional_test.py From DeepLearning_Wavelet-LSTM with MIT License | 4 votes |
def test_zero_padding_2d(): num_samples = 2 stack_size = 2 input_num_row = 4 input_num_col = 5 for data_format in ['channels_first', 'channels_last']: inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size)) inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col)) # basic test layer_test(convolutional.ZeroPadding2D, kwargs={'padding': (2, 2), 'data_format': data_format}, input_shape=inputs.shape) layer_test(convolutional.ZeroPadding2D, kwargs={'padding': ((1, 2), (3, 4)), 'data_format': data_format}, input_shape=inputs.shape) # correctness test layer = convolutional.ZeroPadding2D(padding=(2, 2), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, offset, :, :], 0.) assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) elif data_format == 'channels_first': for offset in [0, 1, -1, -2]: assert_allclose(np_output[:, :, offset, :], 0.) assert_allclose(np_output[:, :, :, offset], 0.) assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.) layer = convolutional.ZeroPadding2D(padding=((1, 2), (3, 4)), data_format=data_format) layer.build(inputs.shape) outputs = layer(K.variable(inputs)) np_output = K.eval(outputs) if data_format == 'channels_last': for top_offset in [0]: assert_allclose(np_output[:, top_offset, :, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, bottom_offset, :, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, left_offset, :], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, right_offset, :], 0.) assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.) elif data_format == 'channels_first': for top_offset in [0]: assert_allclose(np_output[:, :, top_offset, :], 0.) for bottom_offset in [-1, -2]: assert_allclose(np_output[:, :, bottom_offset, :], 0.) for left_offset in [0, 1, 2]: assert_allclose(np_output[:, :, :, left_offset], 0.) for right_offset in [-1, -2, -3, -4]: assert_allclose(np_output[:, :, :, right_offset], 0.) assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)