Python keras.layers.normalization.BatchNormalization() Examples
The following are 30
code examples of keras.layers.normalization.BatchNormalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers.normalization
, or try the search function
.
Example #1
Source File: train_ann.py From subsync with Apache License 2.0 | 8 votes |
def ann_model(input_shape): inp = Input(shape=input_shape, name='mfcc_in') model = inp model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model) model = Conv1D(filters=12, kernel_size=(3), activation='relu')(model) model = Flatten()(model) model = Dense(56)(model) model = Activation('relu')(model) model = BatchNormalization()(model) model = Dropout(0.2)(model) model = Dense(28)(model) model = Activation('relu')(model) model = BatchNormalization()(model) model = Dense(1)(model) model = Activation('sigmoid')(model) model = Model(inp, model) return model
Example #2
Source File: pspnet.py From keras-image-segmentation with MIT License | 7 votes |
def conv_block(input_tensor, filters, strides, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, strides=strides, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) shortcut = Conv2D(filters[2], kernel_size=1, strides=strides)(input_tensor) shortcut = BatchNormalization()(shortcut) x = add([x, shortcut]) x = Activation('relu')(x) return x
Example #3
Source File: pspnet.py From keras-image-segmentation with MIT License | 7 votes |
def identity_block(input_tensor, filters, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) x = add([x, input_tensor]) x = Activation('relu')(x) return x
Example #4
Source File: pspnet.py From keras-image-segmentation with MIT License | 7 votes |
def conv_block(input_tensor, filters, strides, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, strides=strides, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) shortcut = Conv2D(filters[2], kernel_size=1, strides=strides)(input_tensor) shortcut = BatchNormalization()(shortcut) x = add([x, shortcut]) x = Activation('relu')(x) return x
Example #5
Source File: psp_temp.py From keras-image-segmentation with MIT License | 7 votes |
def conv_block(input_tensor, filters, strides, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, strides=strides, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) shortcut = Conv2D(filters[2], kernel_size=1, strides=strides)(input_tensor) shortcut = BatchNormalization()(shortcut) x = add([x, shortcut]) x = Activation('relu')(x) return x
Example #6
Source File: psp_temp.py From keras-image-segmentation with MIT License | 7 votes |
def identity_block(input_tensor, filters, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) x = add([x, input_tensor]) x = Activation('relu')(x) return x
Example #7
Source File: example.py From residual_block_keras with GNU General Public License v3.0 | 7 votes |
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28): model = keras.models.Sequential() first_layer_channel = 128 if is_mnist: # size to be changed to 32,32 model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32) # the first conv model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same')) else: model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols))) model.add(Activation('relu')) # [residual-based Conv layers] residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel) model.add(residual_blocks) model.add(BatchNormalization(axis=1)) model.add(Activation('relu')) # [Classifier] model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax')) # [END] return model
Example #8
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #9
Source File: model.py From Deep-Speckle-Correlation with BSD 3-Clause "New" or "Revised" License | 6 votes |
def conv_factory(x, concat_axis, nb_filter, dropout_rate=None, weight_decay=1E-4): x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (5, 5), dilation_rate=(2, 2), kernel_initializer="he_uniform", padding="same", kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x # define dense block
Example #10
Source File: captcha_gan.py From Intelligent-Projects-Using-Python with MIT License | 6 votes |
def discriminator(img_dim,alpha=0.2): model = Sequential() model.add( Conv2D(64, kernel_size=5,strides=2, padding='same', input_shape=img_dim) ) model.add(LeakyReLU(alpha)) model.add(Conv2D(128,kernel_size=5,strides=2,padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Conv2D(256,kernel_size=5,strides=2,padding='same')) model.add(BatchNormalization()) model.add(LeakyReLU(alpha)) model.add(Flatten()) model.add(Dense(1)) model.add(Activation('sigmoid')) return model # Define a combination of Generator and Discriminator
Example #11
Source File: densenet.py From Model-Playgrounds with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
Example #12
Source File: gc_densenet.py From keras-global-context-networks with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) # global context block x = global_context_block(x) return x
Example #13
Source File: models.py From keras-image-captioning with MIT License | 6 votes |
def _build_image_embedding(self): image_model = InceptionV3(include_top=False, weights='imagenet', pooling='avg') for layer in image_model.layers: layer.trainable = False dense_input = BatchNormalization(axis=-1)(image_model.output) image_dense = Dense(units=self._embedding_size, kernel_regularizer=self._regularizer, kernel_initializer=self._initializer )(dense_input) # Add timestep dimension image_embedding = RepeatVector(1)(image_dense) image_input = image_model.input return image_input, image_embedding
Example #14
Source File: models.py From keras-image-captioning with MIT License | 6 votes |
def _build_sequence_model(self, sequence_input): RNN = GRU if self._rnn_type == 'gru' else LSTM def rnn(): rnn = RNN(units=self._rnn_output_size, return_sequences=True, dropout=self._dropout_rate, recurrent_dropout=self._dropout_rate, kernel_regularizer=self._regularizer, kernel_initializer=self._initializer, implementation=2) rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn return rnn input_ = sequence_input for _ in range(self._rnn_layers): input_ = BatchNormalization(axis=-1)(input_) rnn_out = rnn()(input_) input_ = rnn_out time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out) return time_dist_dense
Example #15
Source File: densenet_fast.py From semantic-embeddings with MIT License | 6 votes |
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_dim_ordering() == "th" else -1 x = Convolution2D(nb_filter, 1, 1, init="he_uniform", border_mode="same", bias=False, W_regularizer=l2(weight_decay))(ip) if dropout_rate: x = Dropout(dropout_rate)(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x) return x
Example #16
Source File: test_normalization.py From CAPTCHA-breaking with MIT License | 6 votes |
def test_weight_init(self): """ Test weight initialization """ norm_m1 = normalization.BatchNormalization((10,), mode=1, weights=[np.ones(10),np.ones(10)]) for inp in [self.input_1, self.input_2, self.input_3]: norm_m1.input = inp out = (norm_m1.get_output(train=True) - np.ones(10))/1. self.assertAlmostEqual(out.mean().eval(), 0.0) if inp.std() > 0.: self.assertAlmostEqual(out.std().eval(), 1.0, places=2) else: self.assertAlmostEqual(out.std().eval(), 0.0, places=2) assert_allclose(norm_m1.gamma.eval(),np.ones(10)) assert_allclose(norm_m1.beta.eval(),np.ones(10)) #Weights must be an iterable of gamma AND beta. self.assertRaises(Exception,normalization.BatchNormalization(10,), weights = np.ones(10))
Example #17
Source File: densenet.py From semantic-embeddings with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
Example #18
Source File: co_lstm_predict_sequence.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #19
Source File: wide_residual_network.py From semantic-embeddings with MIT License | 6 votes |
def conv_block(input, base, k=1, dropout=0.0): init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(input) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) m = Add()([init, x]) return m
Example #20
Source File: co_lstm_predict_day.py From copper_price_forecast with GNU General Public License v3.0 | 6 votes |
def build_model(): """ 定义模型 """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
Example #21
Source File: ae_model.py From Pix2Pose with MIT License | 6 votes |
def DCGAN_discriminator(): nb_filters = 64 nb_conv = int(np.floor(np.log(128) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] input_img = Input(shape=(128, 128, 3)) x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(input_img) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_%s" % (i + 2) x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) x_flat = Flatten()(x) x_out = Dense(1, activation="sigmoid", name="disc_dense")(x_flat) discriminator_model = Model(inputs=input_img, outputs=[x_out]) return discriminator_model
Example #22
Source File: pspnet.py From keras-image-segmentation with MIT License | 6 votes |
def identity_block(input_tensor, filters, d_rates): x = Conv2D(filters[0], kernel_size=1, dilation_rate=d_rates[0])(input_tensor) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[1], kernel_size=3, padding='same', dilation_rate=d_rates[1])(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters[2], kernel_size=1, dilation_rate=d_rates[2])(x) x = BatchNormalization()(x) x = add([x, input_tensor]) x = Activation('relu')(x) return x
Example #23
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def mlp_v2(): model = Sequential() model.add(Dense(2048, input_shape=(21099,))) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) # model.add(Dense(1024)) # model.add(Activation('relu')) # model.add(Dropout(0.5)) # model.add(BatchNormalization()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(BatchNormalization()) model.add(Dense(6)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
Example #24
Source File: model.py From n2n-watermark-remove with MIT License | 6 votes |
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu', dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False): def _conv_block(m, dim, acti, bn, res, do=0): n = Conv2D(dim, 3, activation=acti, padding='same')(m) n = BatchNormalization()(n) if bn else n n = Dropout(do)(n) if do else n n = Conv2D(dim, 3, activation=acti, padding='same')(n) n = BatchNormalization()(n) if bn else n return Concatenate()([m, n]) if res else n def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res): if depth > 0: n = _conv_block(m, dim, acti, bn, res) m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n) m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res) if up: m = UpSampling2D()(m) m = Conv2D(dim, 2, activation=acti, padding='same')(m) else: m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m) n = Concatenate()([n, m]) m = _conv_block(n, dim, acti, bn, res) else: m = _conv_block(m, dim, acti, bn, res, do) return m i = Input(shape=(None, None, input_channel_num)) o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual) o = Conv2D(out_ch, 1)(o) model = Model(inputs=i, outputs=o) return model
Example #25
Source File: act_all_mlp.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def mlp_v3(): model = Sequential() model.add(Dense(1024, input_shape=(10102,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.2)) # model.add(BatchNormalization()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.2)) # model.add(BatchNormalization()) # model.add(Dense(6)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy']) return model
Example #26
Source File: act_use_all_mlp.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def mlp_v3(): model = Sequential() model.add(Dense(1024, input_shape=(13,400,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dropout(0.2)) # model.add(BatchNormalization()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.2)) # model.add(BatchNormalization()) # model.add(Dense(6)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='Nadam', metrics=['accuracy']) return model
Example #27
Source File: model.py From Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow with MIT License | 5 votes |
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
Example #28
Source File: darknet53.py From yolo3-keras with MIT License | 5 votes |
def DarknetConv2D(*args, **kwargs): darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)} darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same' darknet_conv_kwargs.update(kwargs) return Conv2D(*args, **darknet_conv_kwargs) #---------------------------------------------------# # 卷积块 # DarknetConv2D + BatchNormalization + LeakyReLU #---------------------------------------------------#
Example #29
Source File: model.py From perceptron-benchmark with Apache License 2.0 | 5 votes |
def DarknetConv2D_BN_Leaky(*args, **kwargs): """Darknet Convolution2D followed by BatchNormalization and LeakyReLU.""" no_bias_kwargs = {'use_bias': False} no_bias_kwargs.update(kwargs) return compose( DarknetConv2D(*args, **no_bias_kwargs), BatchNormalization(), LeakyReLU(alpha=0.1))
Example #30
Source File: models.py From vess2ret with MIT License | 5 votes |
def BatchNorm(mode=2, axis=1, **kwargs): """Convenience method for BatchNormalization layers.""" if KERAS_2: return BatchNormalization(axis=axis, **kwargs) else: return BatchNormalization(mode=2,axis=axis, **kwargs)