Python tflearn.layers.normalization.batch_normalization() Examples
The following are 7
code examples of tflearn.layers.normalization.batch_normalization().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tflearn.layers.normalization
, or try the search function
.
Example #1
Source File: inception_resnet_v2.py From FRU with MIT License | 6 votes |
def block35(net, scale=1.0, activation="relu"): tower_conv = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_1x1'))) tower_conv1_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None,name='Conv2d_0a_1x1'))) tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 32, 3, bias=False, activation=None,name='Conv2d_0b_3x3'))) tower_conv2_0 = relu(batch_normalization(conv_2d(net, 32, 1, bias=False, activation=None, name='Conv2d_0a_1x1'))) tower_conv2_1 = relu(batch_normalization(conv_2d(tower_conv2_0, 48,3, bias=False, activation=None, name='Conv2d_0b_3x3'))) tower_conv2_2 = relu(batch_normalization(conv_2d(tower_conv2_1, 64,3, bias=False, activation=None, name='Conv2d_0c_3x3'))) tower_mixed = merge([tower_conv, tower_conv1_1, tower_conv2_2], mode='concat', axis=3) tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1'))) net += scale * tower_out if activation: if isinstance(activation, str): net = activations.get(activation)(net) elif hasattr(activation, '__call__'): net = activation(net) else: raise ValueError("Invalid Activation.") return net
Example #2
Source File: inception_resnet_v2.py From FRU with MIT License | 6 votes |
def block17(net, scale=1.0, activation="relu"): tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1'))) tower_conv_1_0 = relu(batch_normalization(conv_2d(net, 128, 1, bias=False, activation=None, name='Conv2d_0a_1x1'))) tower_conv_1_1 = relu(batch_normalization(conv_2d(tower_conv_1_0, 160,[1,7], bias=False, activation=None,name='Conv2d_0b_1x7'))) tower_conv_1_2 = relu(batch_normalization(conv_2d(tower_conv_1_1, 192, [7,1], bias=False, activation=None,name='Conv2d_0c_7x1'))) tower_mixed = merge([tower_conv,tower_conv_1_2], mode='concat', axis=3) tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1'))) net += scale * tower_out if activation: if isinstance(activation, str): net = activations.get(activation)(net) elif hasattr(activation, '__call__'): net = activation(net) else: raise ValueError("Invalid Activation.") return net
Example #3
Source File: inception_resnet_v2.py From FRU with MIT License | 6 votes |
def block8(net, scale=1.0, activation="relu"): tower_conv = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_1x1'))) tower_conv1_0 = relu(batch_normalization(conv_2d(net, 192, 1, bias=False, activation=None, name='Conv2d_0a_1x1'))) tower_conv1_1 = relu(batch_normalization(conv_2d(tower_conv1_0, 224, [1,3], bias=False, activation=None, name='Conv2d_0b_1x3'))) tower_conv1_2 = relu(batch_normalization(conv_2d(tower_conv1_1, 256, [3,1], bias=False, name='Conv2d_0c_3x1'))) tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3) tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1'))) net += scale * tower_out if activation: if isinstance(activation, str): net = activations.get(activation)(net) elif hasattr(activation, '__call__'): net = activation(net) else: raise ValueError("Invalid Activation.") return net
Example #4
Source File: layers.py From polar-transformer-networks with MIT License | 5 votes |
def conv_bn_relu(net, nf, fs, scope, padding='same', strides=1, reuse=False, weights_init='variance_scaling', weight_decay=0., activation='relu'): if padding == 'wrap': padding = 'valid' curr = wrap_pad_rows(net, (fs-1)//2) else: curr = net netout = conv_2d(curr, nf, fs, activation='linear', padding=padding, scope=scope, reuse=reuse, strides=[1, strides, strides, 1], weights_init=weights_init, regularizer='L2', weight_decay=weight_decay) netout = batch_normalization(netout, scope=scope, reuse=reuse) netout = getattr(tflearn.activations, activation)(netout) return netout
Example #5
Source File: inceptionVxOnFire.py From fire-detection-cnn with MIT License | 4 votes |
def construct_inceptionv4onfire(x,y, training=True, enable_batch_norm=True): network = input_data(shape=[None, y, x, 3]) #stem of inceptionV4 conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid') conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3') conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3') b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool') if enable_batch_norm: b_conv_1_pool = batch_normalization(b_conv_1_pool) b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv') b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3) b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3') b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3') b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce') b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7') b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1') b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v') b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3) b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2) b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3') if enable_batch_norm: b_pool5_3_3 = batch_normalization(b_pool5_3_3) b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3) net = b_conv_5 # inceptionV4 modules net=inception_block_a(net) net=inception_block_b(net) net=inception_block_c(net) pool5_7_7=global_avg_pool(net) if(training): pool5_7_7=dropout(pool5_7_7,0.4) loss = fully_connected(pool5_7_7, 2,activation='softmax') if(training): network = regression(loss, optimizer='rmsprop', loss='categorical_crossentropy', learning_rate=0.001) else: network=loss model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire', max_checkpoints=1, tensorboard_verbose=0) return model ################################################################################
Example #6
Source File: model.py From facial-expression-recognition-using-cnn with GNU General Public License v3.0 | 4 votes |
def build_modelB(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob, learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step): images_network = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1') images_network = conv_2d(images_network, 64, 3, activation=NETWORK.activation) #images_network = local_response_normalization(images_network) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 128, 3, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 256, 3, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 4096, activation=NETWORK.activation) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: images_network = batch_normalization(images_network) if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks: if NETWORK.use_hog_sliding_window_and_landmarks: landmarks_network = input_data(shape=[None, 2728], name='input2') elif NETWORK.use_hog_and_landmarks: landmarks_network = input_data(shape=[None, 208], name='input2') else: landmarks_network = input_data(shape=[None, 68, 2], name='input2') landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) landmarks_network = fully_connected(landmarks_network, 128, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) images_network = fully_connected(images_network, 128, activation=NETWORK.activation) network = merge([images_network, landmarks_network], 'concat', axis=1) else: network = images_network network = fully_connected(network, NETWORK.output_size, activation='softmax') if optimizer == 'momentum': optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param, lr_decay=learning_rate_decay, decay_step=decay_step) elif optimizer == 'adam': optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay) else: print( "Unknown optimizer: {}".format(optimizer)) network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output') return network
Example #7
Source File: model.py From facial-expression-recognition-using-cnn with GNU General Public License v3.0 | 4 votes |
def build_modelA(optimizer=HYPERPARAMS.optimizer, optimizer_param=HYPERPARAMS.optimizer_param, learning_rate=HYPERPARAMS.learning_rate, keep_prob=HYPERPARAMS.keep_prob, learning_rate_decay=HYPERPARAMS.learning_rate_decay, decay_step=HYPERPARAMS.decay_step): images_network = input_data(shape=[None, NETWORK.input_size, NETWORK.input_size, 1], name='input1') images_network = conv_2d(images_network, 64, 5, activation=NETWORK.activation) #images_network = local_response_normalization(images_network) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 64, 5, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = max_pool_2d(images_network, 3, strides = 2) images_network = conv_2d(images_network, 128, 4, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_conv_layers: images_network = batch_normalization(images_network) images_network = dropout(images_network, keep_prob=keep_prob) images_network = fully_connected(images_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: images_network = batch_normalization(images_network) if NETWORK.use_landmarks or NETWORK.use_hog_and_landmarks: if NETWORK.use_hog_sliding_window_and_landmarks: landmarks_network = input_data(shape=[None, 2728], name='input2') elif NETWORK.use_hog_and_landmarks: landmarks_network = input_data(shape=[None, 208], name='input2') else: landmarks_network = input_data(shape=[None, 68, 2], name='input2') landmarks_network = fully_connected(landmarks_network, 1024, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) landmarks_network = fully_connected(landmarks_network, 40, activation=NETWORK.activation) if NETWORK.use_batchnorm_after_fully_connected_layers: landmarks_network = batch_normalization(landmarks_network) images_network = fully_connected(images_network, 40, activation=NETWORK.activation) network = merge([images_network, landmarks_network], 'concat', axis=1) else: network = images_network network = fully_connected(network, NETWORK.output_size, activation='softmax') if optimizer == 'momentum': optimizer = Momentum(learning_rate=learning_rate, momentum=optimizer_param, lr_decay=learning_rate_decay, decay_step=decay_step) elif optimizer == 'adam': optimizer = Adam(learning_rate=learning_rate, beta1=optimizer_param, beta2=learning_rate_decay) else: print( "Unknown optimizer: {}".format(optimizer)) network = regression(network, optimizer=optimizer, loss=NETWORK.loss, learning_rate=learning_rate, name='output') return network