Python tensorpack.models.BatchNorm() Examples
The following are 9
code examples of tensorpack.models.BatchNorm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.models
, or try the search function
.
Example #1
Source File: myconv2d.py From ghostnet with Apache License 2.0 | 5 votes |
def BNNoReLU(x, name=None): """ A shorthand of BatchNormalization. """ if name is None: x = BatchNorm('bn', x) else: x = BatchNorm(name, x) return x
Example #2
Source File: resnet.py From ADL with MIT License | 5 votes |
def resnet(input_, option): mode = option.mode DEPTH = option.depth bottleneck = {'se': se_resnet_bottleneck}[mode] cfg = { 50: ([3, 4, 6, 3], bottleneck), } defs, block_func = cfg[DEPTH] group_func = resnet_group with argscope(Conv2D, use_bias=False, kernel_initializer= \ tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \ argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'): l = Conv2D('conv0', input_, 64, 7, strides=2, activation=BNReLU) if option.gating_position[0]: l = gating_op(l, option) l = MaxPooling('pool0', l, 3, strides=2, padding='SAME') if option.gating_position[1]: l = gating_op(l, option) l = group_func('group0', l, block_func, 64, defs[0], 1, option) if option.gating_position[2]: l = gating_op(l, option) l = group_func('group1', l, block_func, 128, defs[1], 2, option) if option.gating_position[3]: l = gating_op(l, option) l = group_func('group2', l, block_func, 256, defs[2], 2, option) if option.gating_position[4]: l = gating_op(l, option) l = group_func('group3', l, block_func, 512, defs[3], 1, option) if option.gating_position[5]: l = gating_op(l, option) p_logits = GlobalAvgPooling('gap', l) logits = FullyConnected('linearnew', p_logits, option.number_of_class) return logits, l
Example #3
Source File: resnet.py From ADL with MIT License | 5 votes |
def get_bn(zero_init=False): if zero_init: return lambda x, name=None: BatchNorm( 'bn', x, gamma_initializer=tf.zeros_initializer()) else: return lambda x, name=None: BatchNorm('bn', x)
Example #4
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def get_bn(zero_init=False): """ Zero init gamma is good for resnet. See https://arxiv.org/abs/1706.02677. """ if zero_init: return lambda x, name=None: BatchNorm('bn', x, gamma_initializer=tf.zeros_initializer()) else: return lambda x, name=None: BatchNorm('bn', x) # ----------------- pre-activation resnet ----------------------
Example #5
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def get_bn(zero_init=False): """ Zero init gamma is good for resnet. See https://arxiv.org/abs/1706.02677. """ if zero_init: return lambda x, name=None: BatchNorm('bn', x, gamma_initializer=tf.zeros_initializer()) else: return lambda x, name=None: BatchNorm('bn', x) # ----------------- pre-activation resnet ----------------------
Example #6
Source File: backbone.py From tensorpack with Apache License 2.0 | 5 votes |
def backbone_scope(freeze): """ Args: freeze (bool): whether to freeze all the variables under the scope """ def nonlin(x): x = get_norm()(x) return tf.nn.relu(x) with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \ argscope(Conv2D, use_bias=False, activation=nonlin, kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out')), \ ExitStack() as stack: if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']: if freeze or cfg.BACKBONE.NORM == 'FreezeBN': stack.enter_context(argscope(BatchNorm, training=False)) else: stack.enter_context(argscope( BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod')) if freeze: stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True)) else: # the layers are not completely freezed, but we may want to only freeze the affine if cfg.BACKBONE.FREEZE_AFFINE: stack.enter_context(custom_getter_scope(freeze_affine_getter)) yield
Example #7
Source File: backbone.py From tensorpack with Apache License 2.0 | 5 votes |
def get_norm(zero_init=False): if cfg.BACKBONE.NORM == 'None': return lambda x: x if cfg.BACKBONE.NORM == 'GN': Norm = GroupNorm layer_name = 'gn' else: Norm = BatchNorm layer_name = 'bn' return lambda x: Norm(layer_name, x, gamma_initializer=tf.zeros_initializer() if zero_init else None)
Example #8
Source File: region_norm_ops.py From Regional-Homogeneity with MIT License | 4 votes |
def RegionNorm(x, h_group_num, w_group_num, gamma_initializer=tf.constant_initializer(1.)): # 1. pad so that h % h_group_num == 0, w % w_group_num == 0 orig_shape = x.get_shape().as_list() h, w = orig_shape[1], orig_shape[2] new_h = get_pad_num(h, h_group_num) new_w = get_pad_num(w, w_group_num) x_resized = tf.image.resize_images(x, [new_h, new_w], align_corners=False) # 2. split and stack all grid assert new_h % h_group_num == 0 sub_h = new_h // h_group_num assert new_w % w_group_num == 0 sub_w = new_w // w_group_num sub_grids = [] for i in range(0, new_h, sub_h): for j in range(0, new_w, sub_w): x_sub_grid = x_resized[:, i:i + sub_h, j:j + sub_w, :, None] sub_grids.append(x_sub_grid) sub_grids = tf.concat(sub_grids, axis=4) sub_grids_shape = sub_grids.get_shape().as_list() feed2bn = tf.reshape(sub_grids, [-1, sub_grids_shape[1], sub_grids_shape[2] * sub_grids_shape[3], sub_grids_shape[4]]) # 3. normalization bn_output = BatchNorm('bn', feed2bn, axis=3, gamma_initializer=gamma_initializer, internal_update=True, sync_statistics='nccl') # 4. go back to original shape new_sub_grids = tf.reshape(bn_output, [-1, sub_grids_shape[1], sub_grids_shape[2], sub_grids_shape[3], sub_grids_shape[4]]) counter = 0 new_rows = [] for i in range(0, new_h, sub_h): new_row = [] for j in range(0, new_w, sub_w): new_row.append(new_sub_grids[:, :, :, :, counter]) counter += 1 new_row = tf.concat(new_row, axis=2) new_rows.append(new_row) new_x_resized = tf.concat(new_rows, axis=1) # 5. resize back new_x = tf.image.resize_images(new_x_resized, [h, w], align_corners=False) return new_x
Example #9
Source File: vgg.py From ADL with MIT License | 4 votes |
def vgg_gap(image, option): with argscope(Conv2D, use_bias=True, kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \ argscope([Conv2D, MaxPooling, BatchNorm, GlobalAvgPooling], data_format='channels_first'): l = convnormrelu(image, 'conv1_1', 64) if option.gating_position[11]: l = gating_op(l, option) l = convnormrelu(l, 'conv1_2', 64) if option.gating_position[12]: l = gating_op(l, option) l = MaxPooling('pool1', l, 2) if option.gating_position[1]: l = gating_op(l, option) l = convnormrelu(l, 'conv2_1', 128) if option.gating_position[21]: l = gating_op(l, option) l = convnormrelu(l, 'conv2_2', 128) if option.gating_position[22]: l = gating_op(l, option) l = MaxPooling('pool2', l, 2) if option.gating_position[2]: l = gating_op(l, option) l = convnormrelu(l, 'conv3_1', 256) if option.gating_position[31]: l = gating_op(l, option) l = convnormrelu(l, 'conv3_2', 256) if option.gating_position[32]: l = gating_op(l, option) l = convnormrelu(l, 'conv3_3', 256) if option.gating_position[33]: l = gating_op(l, option) l = MaxPooling('pool3', l, 2) if option.gating_position[3]: l = gating_op(l, option) l = convnormrelu(l, 'conv4_1', 512) if option.gating_position[41]: l = gating_op(l, option) l = convnormrelu(l, 'conv4_2', 512) if option.gating_position[42]: l = gating_op(l, option) l = convnormrelu(l, 'conv4_3', 512) if option.gating_position[43]: l = gating_op(l, option) l = MaxPooling('pool4', l, 2) if option.gating_position[4]: l = gating_op(l, option) l = convnormrelu(l, 'conv5_1', 512) if option.gating_position[51]: l = gating_op(l, option) l = convnormrelu(l, 'conv5_2', 512) if option.gating_position[52]: l = gating_op(l, option) l = convnormrelu(l, 'conv5_3', 512) if option.gating_position[53]: l = gating_op(l, option) convmaps = convnormrelu(l, 'new', 1024) if option.gating_position[6]: convmaps = gating_op(l, option) p_logits = GlobalAvgPooling('gap', convmaps) logits = FullyConnected('linear', p_logits, option.number_of_class, kernel_initializer=tf.random_normal_initializer( stddev=0.01)) return logits, convmaps