Python tensorflow.contrib.slim.batch_norm() Examples
The following are 30
code examples of tensorflow.contrib.slim.batch_norm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.slim
, or try the search function
.
Example #1
Source File: netvlad_triplets.py From hierarchical_loc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def tower(image, mode, config): image = image_normalization(image) if image.shape[-1] == 1: image = tf.tile(image, [1, 1, 1, 3]) with slim.arg_scope(resnet.resnet_arg_scope()): training = config['train_backbone'] and (mode == Mode.TRAIN) with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=training): _, encoder = resnet.resnet_v1_50(image, is_training=training, global_pool=False, scope='resnet_v1_50') feature_map = encoder['resnet_v1_50/block3'] descriptor = vlad(feature_map, config, mode == Mode.TRAIN) if config['dimensionality_reduction']: descriptor = dimensionality_reduction(descriptor, config) return descriptor
Example #2
Source File: delf.py From hierarchical_loc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def tower(image, mode, config): image = image_normalization(image) if image.shape[-1] == 1: image = tf.tile(image, [1, 1, 1, 3]) with slim.arg_scope(resnet.resnet_arg_scope()): is_training = config['train_backbone'] and (mode == Mode.TRAIN) with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training): _, encoder = resnet.resnet_v1_50(image, is_training=is_training, global_pool=False, scope='resnet_v1_50') feature_map = encoder['resnet_v1_50/block3'] if config['use_attention']: descriptor = delf_attention(feature_map, config, mode == Mode.TRAIN, resnet.resnet_arg_scope()) else: descriptor = tf.reduce_max(feature_map, [1, 2]) if config['dimensionality_reduction']: descriptor = dimensionality_reduction(descriptor, config) return descriptor
Example #3
Source File: mobilenet_v2.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 6 votes |
def mobilenetv2_scope(is_training=True, trainable=True, weight_decay=0.00004, stddev=0.09, dropout_keep_prob=0.8, bn_decay=0.997): """Defines Mobilenet training scope. In default. We do not use BN ReWrite the scope. """ batch_norm_params = { 'is_training': False, 'trainable': False, 'decay': bn_decay, } with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d], trainable=trainable): with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc: return sc
Example #4
Source File: dfc_vae.py From TNT with GNU General Public License v3.0 | 6 votes |
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
Example #5
Source File: dfc_vae_large.py From TNT with GNU General Public License v3.0 | 6 votes |
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4') net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
Example #6
Source File: inception_resnet_v1.py From TNT with GNU General Public License v3.0 | 6 votes |
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.initializers.xavier_initializer(), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v1(images, is_training=phase_train, dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
Example #7
Source File: dummy.py From TNT with GNU General Public License v3.0 | 6 votes |
def inference(images, keep_probability, phase_train=True, # @UnusedVariable bottleneck_layer_size=128, bottleneck_layer_activation=None, weight_decay=0.0, reuse=None): # @UnusedVariable batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): size = np.prod(images.get_shape()[1:].as_list()) net = slim.fully_connected(tf.reshape(images, (-1,size)), bottleneck_layer_size, activation_fn=None, scope='Bottleneck', reuse=False) return net, None
Example #8
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 6 votes |
def _build_aux_head(net, end_points, num_classes, hparams, scope): """Auxiliary head used for all models across all datasets.""" with tf.variable_scope(scope): aux_logits = tf.identity(net) with tf.variable_scope('aux_logits'): aux_logits = slim.avg_pool2d( aux_logits, [5, 5], stride=3, padding='VALID') aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj') aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0') aux_logits = tf.nn.relu(aux_logits) # Shape of feature map before the final layer. shape = aux_logits.shape if hparams.data_format == 'NHWC': shape = shape[1:3] else: shape = shape[2:4] aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID') aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1') aux_logits = tf.nn.relu(aux_logits) aux_logits = contrib_layers.flatten(aux_logits) aux_logits = slim.fully_connected(aux_logits, num_classes) end_points['AuxLogits'] = aux_logits
Example #9
Source File: inception_resnet_v2.py From TNT with GNU General Public License v3.0 | 6 votes |
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.initializers.xavier_initializer(), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v2(images, is_training=phase_train, dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
Example #10
Source File: pyramid_network.py From FastMaskRCNN with Apache License 2.0 | 6 votes |
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001, activation_fn=None, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS, } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
Example #11
Source File: layers.py From hierarchical_loc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def delf_attention(feature_map, config, is_training, arg_scope=None): with tf.variable_scope('attonly/attention/compute'): with slim.arg_scope(arg_scope): is_training = config['train_attention'] and is_training with slim.arg_scope([slim.conv2d, slim.batch_norm], trainable=is_training): with slim.arg_scope([slim.batch_norm], is_training=is_training): attention = slim.conv2d( feature_map, 512, config['attention_kernel'], rate=1, activation_fn=tf.nn.relu, scope='conv1') attention = slim.conv2d( attention, 1, config['attention_kernel'], rate=1, activation_fn=None, normalizer_fn=None, scope='conv2') attention = tf.nn.softplus(attention) if config['normalize_feature_map']: feature_map = tf.nn.l2_normalize(feature_map, -1) descriptor = tf.reduce_sum(feature_map*attention, axis=[1, 2]) if config['normalize_average']: descriptor /= tf.reduce_sum(attention, axis=[1, 2]) return descriptor
Example #12
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 6 votes |
def _cell_base(self, net, prev_layer): """Runs the beginning of the conv cell before the predicted ops are run.""" num_filters = self._filter_size # Check to be sure prev layer stuff is setup correctly prev_layer = self._reduce_prev_layer(prev_layer, net) net = tf.nn.relu(net) net = slim.conv2d(net, num_filters, 1, scope='1x1') net = slim.batch_norm(net, scope='beginning_bn') split_axis = get_channel_index() net = tf.split(axis=split_axis, num_or_size_splits=1, value=net) for split in net: assert int(split.shape[split_axis] == int( self._num_conv_filters * self._filter_scaling)) net.append(prev_layer) return net
Example #13
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 5 votes |
def factorized_reduction(net, output_filters, stride, data_format=INVALID): """Reduces the shape of net without information loss due to striding.""" assert output_filters % 2 == 0, ( 'Need even number of filters when using this factorized reduction.') assert data_format != INVALID if stride == 1: net = slim.conv2d(net, output_filters, 1, scope='path_conv') net = slim.batch_norm(net, scope='path_bn') return net if data_format == 'NHWC': stride_spec = [1, stride, stride, 1] else: stride_spec = [1, 1, stride, stride] # Skip path 1 path1 = tf.nn.avg_pool( net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format) path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv') # Skip path 2 # First pad with 0's on the right and bottom, then shift the filter to # include those 0's that were added. if data_format == 'NHWC': pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :] concat_axis = 3 else: pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]] path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:] concat_axis = 1 path2 = tf.nn.avg_pool( path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format) path2 = slim.conv2d(path2, int(output_filters / 2), 1, scope='path2_conv') # Concat and apply BN final_path = tf.concat(values=[path1, path2], axis=concat_axis) final_path = slim.batch_norm(final_path, scope='final_path_bn') return final_path
Example #14
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 5 votes |
def nasnet_large_arg_scope(weight_decay=5e-5, batch_norm_decay=0.9997, batch_norm_epsilon=1e-3): """Defines the default arg scope for the NASNet-A Large ImageNet model. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. Returns: An `arg_scope` to use for the NASNet Large Model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, 'scale': True, 'fused': True, } weights_regularizer = contrib_layers.l2_regularizer(weight_decay) weights_initializer = contrib_layers.variance_scaling_initializer( mode='FAN_OUT') with arg_scope( [slim.fully_connected, slim.conv2d, slim.separable_conv2d], weights_regularizer=weights_regularizer, weights_initializer=weights_initializer): with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'): with arg_scope( [slim.conv2d, slim.separable_conv2d], activation_fn=None, biases_initializer=None): with arg_scope([slim.batch_norm], **batch_norm_params) as sc: return sc
Example #15
Source File: nasnet_utils.py From benchmarks with Apache License 2.0 | 5 votes |
def _stacked_separable_conv(net, stride, operation, filter_size): """Takes in an operations and parses it to the correct sep operation.""" num_layers, kernel_size = _operation_to_info(operation) net_type = net.dtype net = tf.cast(net, tf.float32) if net_type == tf.float16 else net for layer_num in range(num_layers - 1): net = tf.nn.relu(net) net = slim.separable_conv2d( net, filter_size, kernel_size, depth_multiplier=1, scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1), stride=stride) net = slim.batch_norm( net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1)) stride = 1 net = tf.nn.relu(net) net = slim.separable_conv2d( net, filter_size, kernel_size, depth_multiplier=1, scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers), stride=stride) net = slim.batch_norm( net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers)) net = tf.cast(net, net_type) return net
Example #16
Source File: dfc_vae_resnet.py From TNT with GNU General Public License v3.0 | 5 votes |
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = images net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a') net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a') net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a') net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a') net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
Example #17
Source File: network_base.py From tf-pose with Apache License 2.0 | 5 votes |
def convb(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True, set_tanh=False): with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable): output = slim.convolution2d(input, c_o, kernel_size=[k_h, k_w], stride=stride, normalizer_fn=slim.batch_norm, weights_regularizer=_l2_regularizer_convb, weights_initializer=_init_xavier, # weights_initializer=tf.truncated_normal_initializer(stddev=0.01), biases_initializer=_init_zero if set_bias else None, trainable=self.trainable, activation_fn=common.activation_fn if relu else None, scope=name) if set_tanh: output = tf.nn.sigmoid(output, name=name + '_extra_acv') return output
Example #18
Source File: squeezenet.py From TNT with GNU General Public License v3.0 | 5 votes |
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.xavier_initializer_conv2d(uniform=True), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with tf.variable_scope('squeezenet', [images], reuse=reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=phase_train): net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1') net = fire_module(net, 16, 64, scope='fire2') net = fire_module(net, 16, 64, scope='fire3') net = fire_module(net, 32, 128, scope='fire4') net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4') net = fire_module(net, 32, 128, scope='fire5') net = fire_module(net, 48, 192, scope='fire6') net = fire_module(net, 48, 192, scope='fire7') net = fire_module(net, 64, 256, scope='fire8') net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8') net = fire_module(net, 64, 256, scope='fire9') net = slim.dropout(net, keep_probability) net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10') net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10') net = tf.squeeze(net, [1, 2], name='logits') net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, scope='Bottleneck', reuse=False) return net, None
Example #19
Source File: dfc_vae.py From TNT with GNU General Public License v3.0 | 5 votes |
def decoder(self, latent_var, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('decoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1') net = tf.reshape(net, [-1,4,4,256], name='Reshape') net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1') net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1') net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2') net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2') net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3') net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3') net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4') net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4') return net
Example #20
Source File: network_base.py From tf-pose with Apache License 2.0 | 5 votes |
def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True, set_bias=True): with slim.arg_scope([slim.batch_norm], decay=0.999, fused=common.batchnorm_fused, is_training=self.trainable): output = slim.separable_convolution2d(input, num_outputs=None, stride=stride, trainable=self.trainable, depth_multiplier=1.0, kernel_size=[k_h, k_w], # activation_fn=common.activation_fn if relu else None, activation_fn=None, # normalizer_fn=slim.batch_norm, weights_initializer=_init_xavier, # weights_initializer=_init_norm, weights_regularizer=_l2_regularizer_00004, biases_initializer=None, padding=DEFAULT_PADDING, scope=name + '_depthwise') output = slim.convolution2d(output, c_o, stride=1, kernel_size=[1, 1], activation_fn=common.activation_fn if relu else None, weights_initializer=_init_xavier, # weights_initializer=_init_norm, biases_initializer=_init_zero if set_bias else None, normalizer_fn=slim.batch_norm, trainable=self.trainable, weights_regularizer=None, scope=name + '_pointwise') return output
Example #21
Source File: dfc_vae_resnet.py From TNT with GNU General Public License v3.0 | 5 votes |
def decoder(self, latent_var, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('decoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.fully_connected(latent_var, 4096, activation_fn=None, normalizer_fn=None, scope='Fc_1') net = tf.reshape(net, [-1,4,4,256], name='Reshape') net = tf.image.resize_nearest_neighbor(net, size=(8,8), name='Upsample_1') net = slim.conv2d(net, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1a') net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_1b') net = tf.image.resize_nearest_neighbor(net, size=(16,16), name='Upsample_2') net = slim.conv2d(net, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2a') net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_2b') net = tf.image.resize_nearest_neighbor(net, size=(32,32), name='Upsample_3') net = slim.conv2d(net, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3a') net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_3b') net = tf.image.resize_nearest_neighbor(net, size=(64,64), name='Upsample_4') net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4a') net = slim.repeat(net, 3, conv2d_block, 0.1, 3, [3, 3], 1, activation_fn=activation_fn, scope='Conv2d_4b') net = slim.conv2d(net, 3, [3, 3], 1, activation_fn=None, scope='Conv2d_4c') return net
Example #22
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 5 votes |
def _imagenet_stem(inputs, hparams, stem_cell): """Stem used for models trained on ImageNet.""" num_stem_cells = 2 # 149 x 149 x 32 num_stem_filters = int(32 * hparams.stem_multiplier) net = slim.conv2d( inputs, num_stem_filters, [3, 3], stride=2, scope='conv0', padding='VALID') net = slim.batch_norm(net, scope='conv0_bn') # Run the reduction cells cell_outputs = [None, net] filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells) for cell_num in range(num_stem_cells): net = stem_cell( net, scope='cell_stem_{}'.format(cell_num), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=cell_num) cell_outputs.append(net) filter_scaling *= hparams.filter_scaling_rate return net, cell_outputs
Example #23
Source File: layer_utils.py From centernet_tensorflow_wilderface_voc with MIT License | 5 votes |
def se_conv_unit(x): with tf.variable_scope(None, 'se_conv_unit'): shape=x.get_shape().as_list() y = slim.avg_pool2d(x, (shape[1],shape[2]), stride=1) y=slim.conv2d(y, shape[-1], 1, 1,activation_fn=None) y = slim.batch_norm(y, activation_fn=tf.nn.sigmoid, fused=False) x = tf.multiply(x, y) return x
Example #24
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 5 votes |
def nasnet_mobile_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997, batch_norm_epsilon=1e-3): """Defines the default arg scope for the NASNet-A Mobile ImageNet model. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. Returns: An `arg_scope` to use for the NASNet Mobile Model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, 'scale': True, 'fused': True, } weights_regularizer = contrib_layers.l2_regularizer(weight_decay) weights_initializer = contrib_layers.variance_scaling_initializer( mode='FAN_OUT') with arg_scope( [slim.fully_connected, slim.conv2d, slim.separable_conv2d], weights_regularizer=weights_regularizer, weights_initializer=weights_initializer): with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'): with arg_scope( [slim.conv2d, slim.separable_conv2d], activation_fn=None, biases_initializer=None): with arg_scope([slim.batch_norm], **batch_norm_params) as sc: return sc
Example #25
Source File: nasnet_model.py From benchmarks with Apache License 2.0 | 5 votes |
def nasnet_cifar_arg_scope(weight_decay=5e-4, batch_norm_decay=0.9, batch_norm_epsilon=1e-5): """Defines the default arg scope for the NASNet-A Cifar model. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: Decay for batch norm moving average. batch_norm_epsilon: Small float added to variance to avoid dividing by zero in batch norm. Returns: An `arg_scope` to use for the NASNet Cifar Model. """ batch_norm_params = { # Decay for the moving averages. 'decay': batch_norm_decay, # epsilon to prevent 0s in variance. 'epsilon': batch_norm_epsilon, 'scale': True, 'fused': True, } weights_regularizer = contrib_layers.l2_regularizer(weight_decay) weights_initializer = contrib_layers.variance_scaling_initializer( mode='FAN_OUT') with arg_scope( [slim.fully_connected, slim.conv2d, slim.separable_conv2d], weights_regularizer=weights_regularizer, weights_initializer=weights_initializer): with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'): with arg_scope( [slim.conv2d, slim.separable_conv2d], activation_fn=None, biases_initializer=None): with arg_scope([slim.batch_norm], **batch_norm_params) as sc: return sc
Example #26
Source File: resnet_bak.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 5 votes |
def resnet_arg_scope( is_training=True, weight_decay=cfgs.WEIGHT_DECAY, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): ''' In Default, we do not use BN to train resnet, since batch_size is too small. So is_training is False and trainable is False in the batch_norm params. ''' batch_norm_params = { 'is_training': False, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'trainable': False, 'updates_collections': tf.GraphKeys.UPDATE_OPS } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), trainable=is_training, activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc: return arg_sc
Example #27
Source File: resnet.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 5 votes |
def resnet_arg_scope( is_training=True, weight_decay=cfgs.WEIGHT_DECAY, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): ''' In Default, we do not use BN to train resnet, since batch_size is too small. So is_training is False and trainable is False in the batch_norm params. ''' batch_norm_params = { 'is_training': False, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'trainable': False, 'updates_collections': tf.GraphKeys.UPDATE_OPS } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), trainable=is_training, activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc: return arg_sc
Example #28
Source File: convolution.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def forward(self): pad = [[self.lay.pad, self.lay.pad]] * 2; temp = tf.pad(self.inp.out, [[0, 0]] + pad + [[0, 0]]) temp = tf.nn.conv2d(temp, self.lay.w['kernel'], padding = 'VALID', name = self.scope, strides = [1] + [self.lay.stride] * 2 + [1]) if self.lay.batch_norm: temp = self.batchnorm(self.lay, temp) self.out = tf.nn.bias_add(temp, self.lay.w['biases'])
Example #29
Source File: layer_utils.py From centernet_tensorflow_wilderface_voc with MIT License | 5 votes |
def sa_conv_unit(x): with tf.variable_scope(None, 'sa_conv_unit'): shape=x.get_shape().as_list() y=slim.conv2d(x,shape[-1],kernel_size=1,stride=1,biases_initializer=None,activation_fn=None) y=slim.batch_norm(y,activation_fn=None, fused=False) y=tf.nn.sigmoid(y) x=tf.multiply(x,y) return x
Example #30
Source File: yolov3_centernet_V2.py From centernet_tensorflow_wilderface_voc with MIT License | 5 votes |
def _build_model(self): with slim.arg_scope([slim.batch_norm],is_training=self.is_training): with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,biases_initializer=None, activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1)): with tf.variable_scope('darknet53_body'): route_1, route_2, route_3 = darknet53_body(self.inputs) with tf.variable_scope('yolov3_head'): inter1, net = yolo_block(route_3, 512) # 13*13*1024->(13*13*512,13*13*1024) inter1 = conv2d(inter1, 256, 1) # 13*13*512->13*13*256 inter1 = upsample_layer(inter1, route_2.get_shape().as_list()) # 26*26*256 concat1 = tf.concat([inter1, route_2], axis=3) # 26*26*(256+512)=26*26*768 inter2, net = yolo_block(concat1, 256) # 26*26*768->(26*26*256,26*26*512) inter2 = conv2d(inter2, 128, 1) # 26*26*256->26*26*128 inter2 = upsample_layer(inter2, route_1.get_shape().as_list()) # 26*26*128->52*52*128 concat2 = tf.concat([inter2, route_1], axis=3) # 52*52*(128+256)->52*52*384 _, feature_map_3 = yolo_block(concat2, 128) # 52*52*384->(52*52*128,52*52*256) with tf.variable_scope('detector'): cls = slim.conv2d(feature_map_3, cfg.feature_channels, 3, 1, padding='same',) cls = slim.conv2d(cls, cfg.num_classes, 1, 1, padding='same',normalizer_fn=None,activation_fn=tf.nn.sigmoid, biases_initializer=tf.zeros_initializer()) size = slim.conv2d(feature_map_3, cfg.feature_channels, 3, 1, padding='same', ) size = slim.conv2d(size, 2, 1, 1, padding='same', normalizer_fn=None, activation_fn=None, biases_initializer=tf.zeros_initializer()) return cls, size