Python keras.layers.AveragePooling2D() Examples
The following are 30
code examples of keras.layers.AveragePooling2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: models.py From Carla-RL with MIT License | 7 votes |
def model_base_test_CNN(input_shape): model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(128, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Flatten()) return model.input, model.output # 64x3 model
Example #2
Source File: cnn_model.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_v2(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception_v2 network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 16, 8, 32, 8, 8, 8) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 8, 8, 16, 8, 4, 4) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #3
Source File: keras_ops.py From deep_architect with MIT License | 6 votes |
def avg_pool2d(h_kernel_size, h_stride): def compile_fn(di, dh): layer = layers.AveragePooling2D(pool_size=dh['kernel_size'], strides=(dh['stride'], dh['stride']), padding='same') def fn(di): return {'out': layer(di['in'])} return fn return siso_keras_module('AvgPool', compile_fn, { 'kernel_size': h_kernel_size, 'stride': h_stride, })
Example #4
Source File: keras_ops.py From deep_architect with MIT License | 6 votes |
def avg_pool2d(h_kernel_size, h_stride): def compile_fn(di, dh): layer = layers.AveragePooling2D(pool_size=dh['kernel_size'], strides=(dh['stride'], dh['stride']), padding='same') def fn(di): return {'out': layer(di['in'])} return fn return siso_keras_module('AvgPool2D', compile_fn, { 'kernel_size': h_kernel_size, 'stride': h_stride, })
Example #5
Source File: transfer.py From Transfer-Learning with MIT License | 6 votes |
def add_new_last_layer(base_model, nb_classes): """Add last layer to the convnet Args: base_model: keras model excluding top nb_classes: # of classes Returns: new keras model with last layer """ x = base_model.output x = AveragePooling2D((8, 8), border_mode='valid', name='avg_pool')(x) x = Dropout(0.4)(x) x = Flatten()(x) predictions = Dense(2, activation='softmax')(x) model = Model(input=base_model.input, output=predictions) return model
Example #6
Source File: densenet_gray.py From kaggle-rsna18 with MIT License | 6 votes |
def transition_block(x, reduction, name): """A transition block. # Arguments x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. # Returns output tensor for the block. """ bn_axis = 3 if K.image_data_format() == 'channels_last' else 1 x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(x) x = Activation('relu', name=name + '_relu')(x) x = Conv2D(int(K.int_shape(x)[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x) x = AveragePooling2D(2, strides=2, name=name + '_pool')(x) return x
Example #7
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_layer(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
Example #8
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_SE_layer(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
Example #9
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_layer(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
Example #10
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_SE_layer(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
Example #11
Source File: models.py From Carla-RL with MIT License | 6 votes |
def model_base_4_CNN(input_shape): model = Sequential() model.add(Conv2D(64, (5, 5), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (5, 5), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(128, (5, 5), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Conv2D(256, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Flatten()) return model.input, model.output # 5 CNN layer with residual connections model
Example #12
Source File: cnn_model-predictor.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #13
Source File: cnn_model-predictor.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #14
Source File: cnn_model-predictor.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_Functional(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ in_node = self.model.get_layer(input).output # Inception Ea [out_Ea, out_Ea_name] = self.__addInception_Functional('inceptionEa_' + str(id_branch), in_node, 4, 2, 8, 2, 2, 2) # Inception Eb [out_Eb, out_Eb_name] = self.__addInception_Functional('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) x = AveragePooling2D(pool_size=input_shape, strides=(1, 1), name='ave_pool/ECOC_' + str(id_branch))(out_Eb) # Softmax output_name = 'fc_OnevsOne_' + str(id_branch) x = Flatten(name='fc_OnevsOne_' + str(id_branch) + '/flatten')(x) x = Dropout(0.5, name='fc_OnevsOne_' + str(id_branch) + '/drop')(x) out_node = Dense(nOutput, activation=activation, name=output_name)(x) return out_node
Example #15
Source File: cnn_model-predictor.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception_v2(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception_v2 network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 16, 8, 32, 8, 8, 8) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 8, 8, 16, 8, 4, 4) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #16
Source File: cnn_model.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #17
Source File: models.py From Carla-RL with MIT License | 6 votes |
def model_base_64x3_CNN(input_shape): model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Flatten()) return model.input, model.output # 4 CNN layer model
Example #18
Source File: cnn_model.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #19
Source File: cnn_model.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_Functional(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ in_node = self.model.get_layer(input).output # Inception Ea [out_Ea, out_Ea_name] = self.__addInception_Functional('inceptionEa_' + str(id_branch), in_node, 4, 2, 8, 2, 2, 2) # Inception Eb [out_Eb, out_Eb_name] = self.__addInception_Functional('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) x = AveragePooling2D(pool_size=input_shape, strides=(1, 1), name='ave_pool/ECOC_' + str(id_branch))(out_Eb) # Softmax output_name = 'fc_OnevsOne_' + str(id_branch) x = Flatten(name='fc_OnevsOne_' + str(id_branch) + '/flatten')(x) x = Dropout(0.5, name='fc_OnevsOne_' + str(id_branch) + '/drop')(x) out_node = Dense(nOutput, activation=activation, name=output_name)(x) return out_node
Example #20
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def d_block(inp, fil, p = True): skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) if p: out = AveragePooling2D()(out) return out
Example #21
Source File: models.py From Carla-RL with MIT License | 5 votes |
def model_base_5_wide_CNN_noact(input_shape): input = Input(shape=input_shape) cnn_1_c1 = Conv2D(64, (7, 7), strides=(3, 3), padding='same')(input) cnn_1_a = Activation('relu')(cnn_1_c1) cnn_2_c1 = Conv2D(64, (5, 5), strides=(3, 3), padding='same')(cnn_1_a) #cnn_2_a1 = Activation('relu')(cnn_2_c1) cnn_2_c2 = Conv2D(64, (3, 3), strides=(3, 3), padding='same')(cnn_1_a) #cnn_2_a2 = Activation('relu')(cnn_2_c2) cnn_2_ap = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), padding='same')(cnn_1_a) cnn_2_c = Concatenate()([cnn_2_c1, cnn_2_c2, cnn_2_ap]) cnn_3_c1 = Conv2D(128, (5, 5), strides=(2, 2), padding='same')(cnn_2_c) #cnn_3_a1 = Activation('relu')(cnn_3_c1) cnn_3_c2 = Conv2D(128, (3, 3), strides=(2, 2), padding='same')(cnn_2_c) #cnn_3_a2 = Activation('relu')(cnn_3_c2) cnn_3_ap = AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(cnn_2_c) cnn_3_c = Concatenate()([cnn_3_c1, cnn_3_c2, cnn_3_ap]) cnn_4_c1 = Conv2D(256, (5, 5), strides=(2, 2), padding='same')(cnn_3_c) #cnn_4_a1 = Activation('relu')(cnn_4_c1) cnn_4_c2 = Conv2D(256, (3, 3), strides=(2, 2), padding='same')(cnn_3_c) #cnn_4_a2 = Activation('relu')(cnn_4_c2) cnn_4_ap = AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding='same')(cnn_3_c) cnn_4_c = Concatenate()([cnn_4_c1, cnn_4_c2, cnn_4_ap]) cnn_5_c1 = Conv2D(512, (3, 3), strides=(2, 2), padding='same')(cnn_4_c) #cnn_5_a1 = Activation('relu')(cnn_5_c1) cnn_5_gap = GlobalAveragePooling2D()(cnn_5_c1) return input, cnn_5_gap # --- # Model heads
Example #22
Source File: cnn_model-predictor.py From deepQuest with BSD 3-Clause "New" or "Revised" License | 5 votes |
def add_One_vs_One_3x3_Functional(self, input, input_shape, id_branch, nkernels, nOutput=2, activation='softmax'): # 3x3 convolution out_3x3 = Convolution2D(nkernels, 3, 3, name='3x3/ecoc_' + str(id_branch), activation='relu')(input) # Average Pooling pool_size=(7,7) x = AveragePooling2D(pool_size=input_shape, strides=(1, 1), name='ave_pool/ecoc_' + str(id_branch))(out_3x3) # Softmax output_name = 'fc_OnevsOne_' + str(id_branch) + '/out' x = Flatten(name='fc_OnevsOne_' + str(id_branch) + '/flatten')(x) x = Dropout(0.5, name='fc_OnevsOne_' + str(id_branch) + '/drop')(x) out_node = Dense(nOutput, activation=activation, name=output_name)(x) return out_node
Example #23
Source File: models.py From Carla-RL with MIT License | 5 votes |
def model_base_5_residual_CNN(input_shape): input = Input(shape=input_shape) cnn_1 = Conv2D(64, (7, 7), padding='same')(input) cnn_1a = Activation('relu')(cnn_1) cnn_1c = Concatenate()([cnn_1a, input]) cnn_1ap = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')(cnn_1c) cnn_2 = Conv2D(64, (5, 5), padding='same')(cnn_1ap) cnn_2a = Activation('relu')(cnn_2) cnn_2c = Concatenate()([cnn_2a, cnn_1ap]) cnn_2ap = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')(cnn_2c) cnn_3 = Conv2D(128, (5, 5), padding='same')(cnn_2ap) cnn_3a = Activation('relu')(cnn_3) cnn_3c = Concatenate()([cnn_3a, cnn_2ap]) cnn_3ap = AveragePooling2D(pool_size=(5, 5), strides=(2, 2), padding='same')(cnn_3c) cnn_4 = Conv2D(256, (5, 5), padding='same')(cnn_3ap) cnn_4a = Activation('relu')(cnn_4) cnn_4c = Concatenate()([cnn_4a, cnn_3ap]) cnn_4ap = AveragePooling2D(pool_size=(5, 5), strides=(2, 2), padding='same')(cnn_4c) cnn_5 = Conv2D(512, (3, 3), padding='same')(cnn_4ap) cnn_5a = Activation('relu')(cnn_5) #cnn_5c = Concatenate()([cnn_5a, cnn_4ap]) cnn_5ap = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(cnn_5a) flatten = Flatten()(cnn_5ap) return input, flatten # 5 CNN layer with residual connections and no activations model
Example #24
Source File: layers_builder.py From PSPNet-Keras-tensorflow with MIT License | 5 votes |
def interp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (473, 473): kernel_strides_map = {1: 60, 2: 30, 3: 20, 6: 10} elif input_shape == (713, 713): kernel_strides_map = {1: 90, 2: 45, 3: 30, 6: 15} else: print("Pooling parameters for input shape ", input_shape, " are not defined.") exit(1) names = [ "conv5_3_pool" + str(level) + "_conv", "conv5_3_pool" + str(level) + "_conv_bn" ] kernel = (kernel_strides_map[level], kernel_strides_map[level]) strides = (kernel_strides_map[level], kernel_strides_map[level]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = BN(name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) # prev_layer = Lambda(Interp, arguments={ # 'shape': feature_map_shape})(prev_layer) prev_layer = Interp(feature_map_shape)(prev_layer) return prev_layer
Example #25
Source File: _pspnet_2.py From image-segmentation-keras with MIT License | 5 votes |
def interp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (473, 473): kernel_strides_map = {1: 60, 2: 30, 3: 20, 6: 10} elif input_shape == (713, 713): kernel_strides_map = {1: 90, 2: 45, 3: 30, 6: 15} else: print("Pooling parameters for input shape ", input_shape, " are not defined.") exit(1) names = [ "conv5_3_pool" + str(level) + "_conv", "conv5_3_pool" + str(level) + "_conv_bn" ] kernel = (kernel_strides_map[level], kernel_strides_map[level]) strides = (kernel_strides_map[level], kernel_strides_map[level]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = BN(name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) # prev_layer = Lambda(Interp, arguments={ # 'shape': feature_map_shape})(prev_layer) prev_layer = Interp(feature_map_shape)(prev_layer) return prev_layer
Example #26
Source File: DenseNet_CIFAR10.py From hacktoberfest2018 with GNU General Public License v3.0 | 5 votes |
def add_transition(input, num_filter = 12, dropout_rate = 0.2): global weight_decay BatchNorm = BatchNormalization()(input) relu = Activation('relu')(BatchNorm) Conv2D_BottleNeck = Conv2D(int(num_filter*compression), (1,1), use_bias=False, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(relu) if dropout_rate>0: Conv2D_BottleNeck = Dropout(dropout_rate)(Conv2D_BottleNeck) avg = AveragePooling2D(pool_size=(2,2))(Conv2D_BottleNeck) return avg, int(num_filter*compression)
Example #27
Source File: dlight.py From faceswap with GNU General Public License v3.0 | 5 votes |
def encoder(self): """ DeLight Encoder Network """ input_ = Input(shape=self.input_shape) var_x = input_ var_x1 = self.blocks.conv(var_x, self.encoder_filters // 2) var_x2 = AveragePooling2D()(var_x) var_x2 = LeakyReLU(0.1)(var_x2) var_x = Concatenate()([var_x1, var_x2]) var_x1 = self.blocks.conv(var_x, self.encoder_filters) var_x2 = AveragePooling2D()(var_x) var_x2 = LeakyReLU(0.1)(var_x2) var_x = Concatenate()([var_x1, var_x2]) var_x1 = self.blocks.conv(var_x, self.encoder_filters * 2) var_x2 = AveragePooling2D()(var_x) var_x2 = LeakyReLU(0.1)(var_x2) var_x = Concatenate()([var_x1, var_x2]) var_x1 = self.blocks.conv(var_x, self.encoder_filters * 4) var_x2 = AveragePooling2D()(var_x) var_x2 = LeakyReLU(0.1)(var_x2) var_x = Concatenate()([var_x1, var_x2]) var_x1 = self.blocks.conv(var_x, self.encoder_filters * 8) var_x2 = AveragePooling2D()(var_x) var_x2 = LeakyReLU(0.1)(var_x2) var_x = Concatenate()([var_x1, var_x2]) var_x = Dense(self.encoder_dim)(Flatten()(var_x)) var_x = Dropout(0.05)(var_x) var_x = Dense(4 * 4 * 1024)(var_x) var_x = Dropout(0.05)(var_x) var_x = Reshape((4, 4, 1024))(var_x) return KerasModel(input_, var_x)
Example #28
Source File: stylegan.py From StyleGAN-Keras with MIT License | 5 votes |
def d_block(inp, fil, p = True): route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp) route2 = LeakyReLU(0.01)(route2) if p: route2 = AveragePooling2D()(route2) route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(route2) out = LeakyReLU(0.01)(route2) return out #This object holds the models
Example #29
Source File: mixed-stylegan.py From StyleGAN-Keras with MIT License | 5 votes |
def d_block(inp, fil, p = True): route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp) route2 = LeakyReLU(0.01)(route2) if p: route2 = AveragePooling2D()(route2) route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(route2) out = LeakyReLU(0.01)(route2) return out #This object holds the models
Example #30
Source File: nasnet.py From neural-image-assessment with MIT License | 5 votes |
def _add_auxiliary_head(x, classes, weight_decay): '''Adds an auxiliary head for training the model From section A.7 "Training of ImageNet models" of the paper, all NASNet models are trained using an auxiliary classifier around 2/3 of the depth of the network, with a loss weight of 0.4 # Arguments x: input tensor classes: number of output classes weight_decay: l2 regularization weight # Returns a keras Tensor ''' img_height = 1 if K.image_data_format() == 'channels_last' else 2 img_width = 2 if K.image_data_format() == 'channels_last' else 3 channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 with K.name_scope('auxiliary_branch'): auxiliary_x = Activation('relu')(x) auxiliary_x = AveragePooling2D((5, 5), strides=(3, 3), padding='valid', name='aux_pool')(auxiliary_x) auxiliary_x = Conv2D(128, (1, 1), padding='same', use_bias=False, name='aux_conv_projection', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(auxiliary_x) auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON, name='aux_bn_projection')(auxiliary_x) auxiliary_x = Activation('relu')(auxiliary_x) auxiliary_x = Conv2D(768, (auxiliary_x._keras_shape[img_height], auxiliary_x._keras_shape[img_width]), padding='valid', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), name='aux_conv_reduction')(auxiliary_x) auxiliary_x = BatchNormalization(axis=channel_axis, momentum=_BN_DECAY, epsilon=_BN_EPSILON, name='aux_bn_reduction')(auxiliary_x) auxiliary_x = Activation('relu')(auxiliary_x) auxiliary_x = GlobalAveragePooling2D()(auxiliary_x) auxiliary_x = Dense(classes, activation='softmax', kernel_regularizer=l2(weight_decay), name='aux_predictions')(auxiliary_x) return auxiliary_x