Python keras.layers.GlobalAveragePooling2D() Examples
The following are 30
code examples of keras.layers.GlobalAveragePooling2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: mobilenet_base.py From MobileNetV3 with MIT License | 8 votes |
def _squeeze(self, inputs): """Squeeze and Excitation. This function defines a squeeze structure. # Arguments inputs: Tensor, input tensor of conv layer. """ input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(input_channels, activation='relu')(x) x = Dense(input_channels, activation='hard_sigmoid')(x) x = Reshape((1, 1, input_channels))(x) x = Multiply()([inputs, x]) return x
Example #2
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #3
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 7 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #4
Source File: xception.py From Keras-FasterRCNN with MIT License | 6 votes |
def classifier_layers(x, input_shape, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround # (hence a smaller stride in the region that follows the ROI pool) x = TimeDistributed(SeparableConv2D(1536, (3, 3), padding='same', use_bias=False), name='block14_sepconv1')(x) x = TimeDistributed(BatchNormalization(), name='block14_sepconv1_bn')(x) x = Activation('relu', name='block14_sepconv1_act')(x) x = TimeDistributed(SeparableConv2D(2048, (3, 3), padding='same', use_bias=False), name='block14_sepconv2')(x) x = TimeDistributed(BatchNormalization(), name='block14_sepconv2_bn')(x) x = Activation('relu', name='block14_sepconv2_act')(x) TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x) return x
Example #5
Source File: TransferLearning.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #6
Source File: TransferLearning_reg.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(1)(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #7
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = InceptionV3(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # ResNet50 Model for transfer Learning
Example #8
Source File: xception.py From pygta5 with GNU General Public License v3.0 | 6 votes |
def get_model(session): # create the base pre-trained model base_model = Xception(weights=None, include_top=False, input_shape=(270, 480, 3)) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # add a fully-connected layer x = Dense(1024, activation='relu')(x) # putput layer predictions = Dense(session.training_dataset_info['number_of_labels'], activation='softmax')(x) # model model = Model(inputs=base_model.input, outputs=predictions) learning_rate = 0.001 opt = keras.optimizers.adam(lr=learning_rate, decay=1e-5) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model
Example #9
Source File: TransferLearning_ffd.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'): model = ResNet50(weights='imagenet',include_top=False) x = model.output x = GlobalAveragePooling2D()(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(512, activation='relu')(x) x = Dropout(0.5)(x) out = Dense(5,activation='softmax')(x) model_final = Model(input = model.input,outputs=out) if full_freeze != 'N': for layer in model.layers[0:freeze_layers]: layer.trainable = False return model_final # VGG16 Model for transfer Learning
Example #10
Source File: test_image_data_tasks.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
Example #11
Source File: mobilenet_transfer_pseudo_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): net = MobileNet(input_shape=(128,128,3), include_top=False) # conv_pw_6から訓練させる(41) for i in range(41): net.layers[i].trainable = False # upsampling(32->128) input = Input((32,32,3)) x = UpSampling2D(4)(input) x = net(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) model.summary() return model
Example #12
Source File: test_image_data_tasks.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
Example #13
Source File: test_image_data_tasks.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
Example #14
Source File: test_image_data_tasks.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
Example #15
Source File: test_image_data_tasks.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_image_classification(): np.random.seed(1337) input_shape = (16, 16, 3) (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500, num_test=200, input_shape=input_shape, classification=True, num_classes=4) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential([ layers.Conv2D(filters=8, kernel_size=3, activation='relu', input_shape=input_shape), layers.MaxPooling2D(pool_size=2), layers.Conv2D(filters=4, kernel_size=(3, 3), activation='relu', padding='same'), layers.GlobalAveragePooling2D(), layers.Dense(y_test.shape[-1], activation='softmax') ]) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_data=(x_test, y_test), verbose=0) assert history.history['val_acc'][-1] > 0.75 config = model.get_config() model = Sequential.from_config(config)
Example #16
Source File: squeeze_excitation_block.py From CNNArt with Apache License 2.0 | 5 votes |
def squeeze_excitation_block(inputSE, ratio=16): ''' Creates a squeeze and excitation block :param input: input tensor :param ratio: reduction ratio r for bottleneck given by the two FC layers :return: keras tensor ''' if backend.image_data_format() == 'channels_first': channels = 1 else: channels = -1 # number of input filters/channels inputSE_shape = backend.int_shape(inputSE) numChannels = inputSE_shape[channels] #squeeze operation output = GlobalAveragePooling2D(data_format=backend.image_data_format())(inputSE) #excitation operation output = Dense(numChannels//ratio, activation='relu', use_bias=True, kernel_initializer='he_normal')(output) output = Dense(numChannels, activation='sigmoid', use_bias=True, kernel_initializer='he_normal')(output) #scale operation output = multiply([inputSE, output]) return output
Example #17
Source File: pseudo_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): input = Input(shape=(32,32,3)) x = basic_conv_block(input, 64, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 128, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 256, 3) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) return model
Example #18
Source File: mobilenet_transfer_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): net = MobileNet(input_shape=(128,128,3), include_top=False) # conv_pw_6から訓練させる(41) for i in range(41): net.layers[i].trainable = False # upsampling(32->128) input = Input((32,32,3)) x = UpSampling2D(4)(input) x = net(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) model.summary() return model
Example #19
Source File: supervised_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): input = Input(shape=(32,32,3)) x = basic_conv_block(input, 64, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 128, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 256, 3) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) return model
Example #20
Source File: pseudo_pretrain_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): input = Input(shape=(32,32,3)) x = basic_conv_block(input, 64, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 128, 3) x = AveragePooling2D(2)(x) x = basic_conv_block(x, 256, 3) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) return model
Example #21
Source File: squeeze_excitation_block.py From CNNArt with Apache License 2.0 | 5 votes |
def squeeze_excitation_block(inputSE, ratio=16): ''' Creates a squeeze and excitation block :param input: input tensor :param ratio: reduction ratio r for bottleneck given by the two FC layers :return: keras tensor ''' if backend.image_data_format() == 'channels_first': channels = 1 else: channels = -1 # number of input filters/channels inputSE_shape = backend.int_shape(inputSE) numChannels = inputSE_shape[channels] #squeeze operation output = GlobalAveragePooling2D(data_format=backend.image_data_format())(inputSE) #excitation operation output = Dense(numChannels//ratio, activation='relu', use_bias=True, kernel_initializer='he_normal')(output) output = Dense(numChannels, activation='sigmoid', use_bias=True, kernel_initializer='he_normal')(output) #scale operation output = multiply([inputSE, output]) return output
Example #22
Source File: mobilenet_pseudo_cifar.py From Pseudo-Label-Keras with MIT License | 5 votes |
def create_cnn(): net = MobileNet(input_shape=(128,128,3), weights=None, include_top=False) # upsampling(32->128) input = Input((32,32,3)) x = UpSampling2D(4)(input) x = net(x) x = GlobalAveragePooling2D()(x) x = Dense(10, activation="softmax")(x) model = Model(input, x) model.summary() return model
Example #23
Source File: inception_v3.py From keras-transfer-learning-for-oxford102 with MIT License | 5 votes |
def _create(self): base_model = KerasInceptionV3(weights='imagenet', include_top=False, input_tensor=self.get_input_tensor()) self.make_net_layers_non_trainable(base_model) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(self.noveltyDetectionLayerSize, activation='elu', name=self.noveltyDetectionLayerName)(x) predictions = Dense(len(config.classes), activation='softmax')(x) self.model = Model(input=base_model.input, output=predictions)
Example #24
Source File: PredictClassifierEnsemble.py From kaggle-rsna18 with MIT License | 5 votes |
def get_model(base_model, layer, lr=1e-3, input_shape=(224,224,1), classes=2, activation="softmax", dropout=None, pooling="avg", weights=None, pretrained=None): base = base_model(input_shape=input_shape, include_top=False, weights=pretrained, channels="gray") if pooling == "avg": x = GlobalAveragePooling2D()(base.output) elif pooling == "max": x = GlobalMaxPooling2D()(base.output) elif pooling is None: x = Flatten()(base.output) if dropout is not None: x = Dropout(dropout)(x) x = Dense(classes, activation=activation)(x) model = Model(inputs=base.input, outputs=x) if weights is not None: model.load_weights(weights) for l in model.layers[:layer]: l.trainable = False model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=optimizers.Adam(lr)) return model ########## ## DATA ## ########## # == PREPROCESSING == #
Example #25
Source File: TrainOneClassifier.py From kaggle-rsna18 with MIT License | 5 votes |
def get_model(base_model, layer, lr=1e-3, input_shape=(224,224,1), classes=2, activation="softmax", dropout=None, pooling="avg", weights=None, pretrained="imagenet"): base = base_model(input_shape=input_shape, include_top=False, weights=pretrained, channels="gray") if pooling == "avg": x = GlobalAveragePooling2D()(base.output) elif pooling == "max": x = GlobalMaxPooling2D()(base.output) elif pooling is None: x = Flatten()(base.output) if dropout is not None: x = Dropout(dropout)(x) x = Dense(classes, activation=activation)(x) model = Model(inputs=base.input, outputs=x) if weights is not None: model.load_weights(weights) for l in model.layers[:layer]: l.trainable = False model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=optimizers.Adam(lr)) return model ########## ## DATA ## ########## # == PREPROCESSING == #
Example #26
Source File: TrainClassifierEnsemble.py From kaggle-rsna18 with MIT License | 5 votes |
def get_model(base_model, layer, lr=1e-3, input_shape=(224,224,1), classes=2, activation="softmax", dropout=None, pooling="avg", weights=None, pretrained="imagenet"): base = base_model(input_shape=input_shape, include_top=False, weights=pretrained, channels="gray") if pooling == "avg": x = GlobalAveragePooling2D()(base.output) elif pooling == "max": x = GlobalMaxPooling2D()(base.output) elif pooling is None: x = Flatten()(base.output) if dropout is not None: x = Dropout(dropout)(x) x = Dense(classes, activation=activation)(x) model = Model(inputs=base.input, outputs=x) if weights is not None: model.load_weights(weights) for l in model.layers[:layer]: l.trainable = False model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=optimizers.Adam(lr)) return model ########## ## DATA ## ########## # == PREPROCESSING == #
Example #27
Source File: attention_module-checkpoint.py From CBAM-keras with MIT License | 5 votes |
def channel_attention(input_feature, ratio=8): channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] shared_layer_one = Dense(channel//ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') shared_layer_two = Dense(channel, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') avg_pool = GlobalAveragePooling2D()(input_feature) avg_pool = Reshape((1,1,channel))(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) avg_pool = shared_layer_one(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) avg_pool = shared_layer_two(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) max_pool = GlobalMaxPooling2D()(input_feature) max_pool = Reshape((1,1,channel))(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) max_pool = shared_layer_one(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel//ratio) max_pool = shared_layer_two(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) cbam_feature = Add()([avg_pool,max_pool]) cbam_feature = Activation('sigmoid')(cbam_feature) if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) return multiply([input_feature, cbam_feature])
Example #28
Source File: attention_module-checkpoint.py From CBAM-keras with MIT License | 5 votes |
def se_block(input_feature, ratio=8): """Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507. """ channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) se_feature = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel//ratio) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) if K.image_data_format() == 'channels_first': se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature
Example #29
Source File: attention_module.py From CBAM-keras with MIT License | 5 votes |
def channel_attention(input_feature, ratio=8): channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] shared_layer_one = Dense(channel//ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') shared_layer_two = Dense(channel, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') avg_pool = GlobalAveragePooling2D()(input_feature) avg_pool = Reshape((1,1,channel))(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) avg_pool = shared_layer_one(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) avg_pool = shared_layer_two(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) max_pool = GlobalMaxPooling2D()(input_feature) max_pool = Reshape((1,1,channel))(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) max_pool = shared_layer_one(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel//ratio) max_pool = shared_layer_two(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) cbam_feature = Add()([avg_pool,max_pool]) cbam_feature = Activation('sigmoid')(cbam_feature) if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) return multiply([input_feature, cbam_feature])
Example #30
Source File: attention_module.py From CBAM-keras with MIT License | 5 votes |
def se_block(input_feature, ratio=8): """Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507. """ channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) se_feature = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel//ratio) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) if K.image_data_format() == 'channels_first': se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature