Python tensorflow.python.keras.layers.BatchNormalization() Examples
The following are 21
code examples of tensorflow.python.keras.layers.BatchNormalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.layers
, or try the search function
.
Example #1
Source File: resnet50.py From ImageAI with MIT License | 6 votes |
def resnet_first_block_first_module(input, channel_depth): residual_input = input stride = 1 residual_input = Conv2D(channel_depth, kernel_size=1, strides=1, padding="same")(residual_input) residual_input = BatchNormalization()(residual_input) input = Conv2D(int(channel_depth/4), kernel_size=1, strides=stride, padding="same")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(int(channel_depth / 4), kernel_size=3, strides=stride, padding="same")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(channel_depth, kernel_size=1, strides=stride, padding="same")(input) input = BatchNormalization()(input) input = add([input, residual_input]) input = Activation("relu")(input) return input
Example #2
Source File: resnet50.py From ImageAI with MIT License | 6 votes |
def resnet_module(input, channel_depth, strided_pool=False ): residual_input = input stride = 1 if(strided_pool): stride = 2 residual_input = Conv2D(channel_depth, kernel_size=1, strides=stride, padding="same")(residual_input) residual_input = BatchNormalization()(residual_input) input = Conv2D(int(channel_depth/4), kernel_size=1, strides=stride, padding="same")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(int(channel_depth / 4), kernel_size=3, strides=1, padding="same")(input) input = BatchNormalization()(input) input = Activation("relu")(input) input = Conv2D(channel_depth, kernel_size=1, strides=1, padding="same")(input) input = BatchNormalization()(input) input = add([input, residual_input]) input = Activation("relu")(input) return input
Example #3
Source File: densenet.py From ImageAI with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
Example #4
Source File: rnn.py From cxplain with MIT License | 6 votes |
def build(self, input_layer): last_layer = input_layer input_shape = K.int_shape(input_layer) if self.with_embedding: if input_shape[-1] != 1: raise ValueError("Only one feature (the index) can be used with embeddings, " "i.e. the input shape should be (num_samples, length, 1). " "The actual shape was: " + str(input_shape)) last_layer = Lambda(lambda x: K.squeeze(x, axis=-1), output_shape=K.int_shape(last_layer)[:-1])(last_layer) # Remove feature dimension. last_layer = Embedding(self.embedding_size, self.embedding_dimension, input_length=input_shape[-2])(last_layer) for _ in range(self.num_layers): last_layer = Dense(self.num_units, activation=self.activation)(last_layer) if self.with_bn: last_layer = BatchNormalization()(last_layer) if not np.isclose(self.p_dropout, 0): last_layer = Dropout(self.p_dropout)(last_layer) return last_layer
Example #5
Source File: ops.py From progressive-neural-architecture-search with MIT License | 5 votes |
def __init__(self, filters, kernel, strides): ''' Constructs a Spatial Convolution - Batch Normalization - Relu block. ''' super(Convolution, self).__init__() self.conv = Conv2D(filters, kernel, strides=strides, padding='same', kernel_initializer='he_uniform') self.bn = BatchNormalization()
Example #6
Source File: ops.py From progressive-neural-architecture-search with MIT License | 5 votes |
def __init__(self, filters, kernel, strides): ''' Constructs a Seperable Convolution - Batch Normalization - Relu block. ''' super(SeperableConvolution, self).__init__() self.conv = SeparableConv2D(filters, kernel, strides=strides, padding='same', kernel_initializer='he_uniform') self.bn = BatchNormalization()
Example #7
Source File: RTSNNet.py From alpha-zero-general with MIT License | 5 votes |
def __init__(self, game, encoder): """ NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels :param game: game configuration :param encoder: Encoder, used to encode game boards """ from rts.src.config_class import CONFIG # game params self.board_x, self.board_y, num_encoders = game.getBoardSize() self.action_size = game.getActionSize() """ num_encoders = CONFIG.nnet_args.encoder.num_encoders """ num_encoders = encoder.num_encoders # Neural Net self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders)) # s: batch_size x board_x x board_y x num_encoders x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards) # batch_size x board_x x board_y x num_encoders h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image))) # batch_size x board_x x board_y x num_channels h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1))) # batch_size x board_x x board_y x num_channels h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2))) # batch_size x (board_x-2) x (board_y-2) x num_channels h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3))) # batch_size x (board_x-4) x (board_y-4) x num_channels h_conv4_flat = Flatten()(h_conv4) s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat)))) # batch_size x 1024 s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1)))) # batch_size x 1024 self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2) # batch_size x self.action_size self.v = Dense(1, activation='tanh', name='v')(s_fc2) # batch_size x 1 self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v]) self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr))
Example #8
Source File: resnet.py From camera-trap-classifier with MIT License | 5 votes |
def _bn_relu(input): """Helper to build a BN -> relu block """ norm = BatchNormalization(axis=CHANNEL_AXIS)(input) return Activation("relu")(norm)
Example #9
Source File: mlp.py From cxplain with MIT License | 5 votes |
def build(self, input_layer): last_layer = input_layer for _ in range(self.num_layers): last_layer = Dense(self.num_units, activation=self.activation)(last_layer) if self.with_bn: last_layer = BatchNormalization()(last_layer) if not np.isclose(self.p_dropout, 0): last_layer = Dropout(self.p_dropout)(last_layer) return last_layer
Example #10
Source File: tpu_normalization.py From BigGAN-TPU-TensorFlow with MIT License | 5 votes |
def cross_replica_batch_normalization(inputs, training=False, num_distributed_groups=1, **kwargs): """Functional interface for the cross replica batch normalization layer. For detailed information of arguments and implementation, refer to: https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization Arguments: inputs: Tensor input. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (normalized with statistics of the current batch) or in inference mode (normalized with moving statistics). **NOTE**: make sure to set this parameter correctly, or else your training/inference will not work properly. num_distributed_groups: Number of groups to normalize in the distributed batch normalization. Replicas will evenly split into groups. For example, 1 for global batch norm and -1 or None for per-replica batch norm. **kwargs: For passing through arguments to BatchNormalization. Returns: Output tensor. Raises: ValueError: if eager execution is enabled. """ layer = BatchNormalization( cross_replica_average_fn=functools.partial( cross_replica_average, num_groups=num_distributed_groups), **kwargs) return layer.apply(inputs, training=training)
Example #11
Source File: tpu_normalization.py From BigGAN-TPU-TensorFlow with MIT License | 5 votes |
def _moments(self, inputs, reduction_axes, keep_dims): mean, variance = super(BatchNormalization, self)._moments( inputs, reduction_axes, keep_dims=keep_dims) if self.cross_replica_average_fn: mean = self.cross_replica_average_fn(mean) variance = self.cross_replica_average_fn(variance) return (mean, variance)
Example #12
Source File: tpu_normalization.py From BigGAN-TPU-TensorFlow with MIT License | 5 votes |
def __init__(self, fused=None, cross_replica_average_fn=None, **kwargs): super(BatchNormalization, self).__init__(**kwargs) self.cross_replica_average_fn = cross_replica_average_fn if fused and cross_replica_average_fn is not None: raise ValueError('fused must be `False` when sepcifying' ' cross_replica_average_fn')
Example #13
Source File: densenet.py From ImageAI with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #14
Source File: resnet_model.py From ml-on-gcp with Apache License 2.0 | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #15
Source File: resnet_cifar_model.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 4 votes |
def identity_building_block(input_tensor, kernel_size, filters, stage, block, training=None): """The identity block is the block that has no conv layer at shortcut. Arguments: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: current block label, used for generating layer names training: Only used if training keras model with Estimator. In other scenarios it is handled automatically. Returns: Output tensor for the block. """ filters1, filters2 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x, training=training) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x, training=training) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #16
Source File: resnet_model.py From tpu_models with Apache License 2.0 | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. Args: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Returns: Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, use_bias=False, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #17
Source File: resnet_model.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #18
Source File: resnet_cifar_model.py From models with Apache License 2.0 | 4 votes |
def identity_building_block(input_tensor, kernel_size, filters, stage, block, training=None): """The identity block is the block that has no conv layer at shortcut. Arguments: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: current block label, used for generating layer names training: Only used if training keras model with Estimator. In other scenarios it is handled automatically. Returns: Output tensor for the block. """ filters1, filters2 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x, training=training) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x, training=training) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #19
Source File: resnet_model.py From training_results_v0.5 with Apache License 2.0 | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #20
Source File: resnet_model.py From class-balanced-loss with MIT License | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #21
Source File: inceptionv3.py From ImageAI with MIT License | 4 votes |
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), name=None): """Utility function to apply conv + BN. # Arguments x: input tensor. filters: filters in `Conv2D`. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv2D`. strides: strides in `Conv2D`. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv2D` and `BatchNormalization`. """ if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 3 x = Conv2D( filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False, name=conv_name)(x) x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) x = Activation('relu', name=name)(x) return x