Python tensorpack.tfutils.argscope.argscope() Examples

The following are 20 code examples of tensorpack.tfutils.argscope.argscope(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorpack.tfutils.argscope , or try the search function .
Example #1
Source File: basemodel.py    From PReMVOS with MIT License 6 votes vote down vote up
def pretrained_resnet_conv4(image, num_blocks):
    assert len(num_blocks) == 3
    with argscope([Conv2D, MaxPooling, BatchNorm], data_format='NCHW'), \
            argscope(Conv2D, nl=tf.identity, use_bias=False), \
            argscope(BatchNorm, use_local_stat=False):
        l = tf.pad(image, [[0, 0], [0, 0], [2, 3], [2, 3]])
        l = Conv2D('conv0', l, 64, 7, stride=2, nl=BNReLU, padding='VALID')
        l = tf.pad(l, [[0, 0], [0, 0], [0, 1], [0, 1]])
        l = MaxPooling('pool0', l, shape=3, stride=2, padding='VALID')
        l = resnet_group(l, 'group0', resnet_bottleneck, 64, num_blocks[0], 1)
        # TODO replace var by const to enable folding
        l = tf.stop_gradient(l)
        l = resnet_group(l, 'group1', resnet_bottleneck, 128, num_blocks[1], 2)
        l = resnet_group(l, 'group2', resnet_bottleneck, 256, num_blocks[2], 2)
    # 16x downsampling up to now
    return l 
Example #2
Source File: model_rpn.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def rpn_head(featuremap, channel, num_anchors):
    """
    Returns:
        label_logits: fHxfWxNA
        box_logits: fHxfWxNAx4
    """
    with argscope(Conv2D, data_format='channels_first',
                  kernel_initializer=tf.random_normal_initializer(stddev=0.01)):
        hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu)

        label_logits = Conv2D('class', hidden, num_anchors, 1)
        box_logits = Conv2D('box', hidden, 4 * num_anchors, 1)
        # 1, NA(*4), im/16, im/16 (NCHW)

        label_logits = tf.transpose(label_logits, [0, 2, 3, 1])  # 1xfHxfWxNA
        label_logits = tf.squeeze(label_logits, 0)  # fHxfWxNA

        shp = tf.shape(box_logits)  # 1x(NAx4)xfHxfW
        box_logits = tf.transpose(box_logits, [0, 2, 3, 1])  # 1xfHxfWx(NAx4)
        box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4]))  # fHxfWxNAx4
    return label_logits, box_logits 
Example #3
Source File: model_mrcnn.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def maskrcnn_upXconv_head(feature, num_category, num_convs, norm=None):
    """
    Args:
        feature (NxCx s x s): size is 7 in C4 models and 14 in FPN models.
        num_category(int):
        num_convs (int): number of convolution layers
        norm (str or None): either None or 'GN'

    Returns:
        mask_logits (N x num_category x 2s x 2s):
    """
    assert norm in [None, 'GN'], norm
    l = feature
    with argscope([Conv2D, Conv2DTranspose], data_format='channels_first',
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out',
                      distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
        # c2's MSRAFill is fan_out
        for k in range(num_convs):
            l = Conv2D('fcn{}'.format(k), l, cfg.MRCNN.HEAD_DIM, 3, activation=tf.nn.relu)
            if norm is not None:
                l = GroupNorm('gn{}'.format(k), l)
        l = Conv2DTranspose('deconv', l, cfg.MRCNN.HEAD_DIM, 2, strides=2, activation=tf.nn.relu)
        l = Conv2D('conv', l, num_category, 1, kernel_initializer=tf.random_normal_initializer(stddev=0.001))
    return l 
Example #4
Source File: model_frcnn.py    From tensorpack with Apache License 2.0 6 votes vote down vote up
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
    """
    Args:
        feature (NCHW):
        num_classes(int): num_category + 1
        num_convs (int): number of conv layers
        norm (str or None): either None or 'GN'

    Returns:
        2D head feature
    """
    assert norm in [None, 'GN'], norm
    l = feature
    with argscope(Conv2D, data_format='channels_first',
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out',
                      distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
        for k in range(num_convs):
            l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
            if norm is not None:
                l = GroupNorm('gn{}'.format(k), l)
        l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
                           kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
    return l 
Example #5
Source File: model.py    From PReMVOS with MIT License 6 votes vote down vote up
def rpn_head(featuremap, channel, num_anchors):
    """
    Returns:
        label_logits: fHxfWxNA
        box_logits: fHxfWxNAx4
    """
    with argscope(Conv2D, data_format='NCHW',
                  W_init=tf.random_normal_initializer(stddev=0.01)):
        hidden = Conv2D('conv0', featuremap, channel, 3, nl=tf.nn.relu)

        label_logits = Conv2D('class', hidden, num_anchors, 1)
        box_logits = Conv2D('box', hidden, 4 * num_anchors, 1)
        # 1, NA(*4), im/16, im/16 (NCHW)

        label_logits = tf.transpose(label_logits, [0, 2, 3, 1])  # 1xfHxfWxNA
        label_logits = tf.squeeze(label_logits, 0)  # fHxfWxNA

        shp = tf.shape(box_logits)  # 1x(NAx4)xfHxfW
        box_logits = tf.transpose(box_logits, [0, 2, 3, 1])  # 1xfHxfWx(NAx4)
        box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4]))  # fHxfWxNAx4
    return label_logits, box_logits 
Example #6
Source File: resnet_model.py    From benchmarks with The Unlicense 6 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    """
    Sec 5.1: We adopt the initialization of [15] for all convolutional layers.
    TensorFlow does not have the true "MSRA init". We use variance_scaling as an approximation.
    """
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    """
    Sec 5.1:
    The 1000-way fully-connected layer is initialized by
    drawing weights from a zero-mean Gaussian with standard
    deviation of 0.01.
    """
    return logits 
Example #7
Source File: resnet_model.py    From GroupNorm-reproduce with Apache License 2.0 6 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_out', distribution='untruncated_normal')):
        logits = (LinearWrap(image)
                  .tf.pad([[0, 0], [0, 0], [3, 3], [3, 3]])
                  .Conv2D('conv0', 64, 7, strides=2, activation=GNReLU, padding='VALID')
                  .tf.pad([[0, 0], [0, 0], [1, 1], [1, 1]])
                  .MaxPooling('pool0', shape=3, stride=2, padding='VALID')
                  .apply(group_func, 'group0', block_func, 64, num_blocks[0], 1)
                  .apply(group_func, 'group1', block_func, 128, num_blocks[1], 2)
                  .apply(group_func, 'group2', block_func, 256, num_blocks[2], 2)
                  .apply(group_func, 'group3', block_func, 512, num_blocks[3], 2)
                  .GlobalAvgPooling('gap')
                  .FullyConnected('linear', 1000,
                                  kernel_initializer=tf.random_normal_initializer(stddev=0.01))())
    return logits 
Example #8
Source File: densenet_model.py    From LQ-Nets with MIT License 6 votes vote down vote up
def densenet_backbone(image, qw=1):
    with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
                  W_init=variance_scaling_initializer(mode='FAN_IN'),
                  data_format=get_arg_scope()['Conv2D']['data_format'],
                  nbit=qw,
                  is_quant=True if qw > 0 else False):
        logits = (LinearWrap(image)
                  .Conv2DQuant('conv1', 2 * GROWTH_RATE, 7, stride=2, nl=BNReLU, is_quant=False)
                  .MaxPooling('pool1', shape=3, stride=2, padding='SAME')
                  # 56
                  .apply(add_dense_block, 'block0', 6)
                  # 28
                  .apply(add_dense_block, 'block1', 12)
                  # 14
                  .apply(add_dense_block, 'block2', 24)
                  # 7
                  .apply(add_dense_block, 'block3', 16, last=True)
                  .BNReLU('bnrelu_last')
                  .GlobalAvgPooling('gap')
                  .FullyConnected('linear', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))())
    return logits 
Example #9
Source File: alexnet_model.py    From LQ-Nets with MIT License 6 votes vote down vote up
def alexnet_backbone(image, qw=1):
    with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
                  W_init=tf.random_normal_initializer(stddev=0.01),
                  data_format=get_arg_scope()['Conv2D']['data_format'],
                  nbit=qw):
        logits = (LinearWrap(image)
                  .Conv2DQuant('conv1', 96, 11, stride=4, is_quant=False, padding='VALID')
                  .MaxPooling('pool1', shape=3, stride=2, padding='VALID')
                  .BNReLUQuant('bnquant2')
                  .Conv2DQuant('conv2', 256, 5)
                  .MaxPooling('pool2', shape=3, stride=2, padding='VALID')
                  .BNReLUQuant('bnquant3')
                  .Conv2DQuant('conv3', 384, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv4', 384, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv5', 256, 3)
                  .MaxPooling('pool5', shape=3, stride=2, padding='VALID')
                  .BNReLUQuant('bnquant6')
                  .Conv2DQuant('fc6', 4096, 6, nl=getfcBNReLUQuant, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True)
                  .Conv2DQuant('fc7', 4096, 1, nl=getfcBNReLU, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True)
                  .FullyConnected('fc8', out_dim=1000, nl=tf.identity, W_init=tf.random_normal_initializer(stddev=0.01))())
    return logits 
Example #10
Source File: resnet.py    From ADL with MIT License 5 votes vote down vote up
def resnet(input_, option):
    mode = option.mode
    DEPTH = option.depth
    bottleneck = {'se': se_resnet_bottleneck}[mode]

    cfg = {
        50: ([3, 4, 6, 3], bottleneck),
    }
    defs, block_func = cfg[DEPTH]
    group_func = resnet_group

    with argscope(Conv2D, use_bias=False, kernel_initializer= \
            tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \
         argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm],
                  data_format='channels_first'):

        l = Conv2D('conv0', input_, 64, 7, strides=2, activation=BNReLU)
        if option.gating_position[0]: l = gating_op(l, option)

        l = MaxPooling('pool0', l, 3, strides=2, padding='SAME')
        if option.gating_position[1]: l = gating_op(l, option)

        l = group_func('group0', l, block_func, 64, defs[0], 1, option)
        if option.gating_position[2]: l = gating_op(l, option)

        l = group_func('group1', l, block_func, 128, defs[1], 2, option)
        if option.gating_position[3]: l = gating_op(l, option)

        l = group_func('group2', l, block_func, 256, defs[2], 2, option)
        if option.gating_position[4]: l = gating_op(l, option)

        l = group_func('group3', l, block_func, 512, defs[3],
                       1, option)
        if option.gating_position[5]: l = gating_op(l, option)

        p_logits = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linearnew', p_logits, option.number_of_class)

    return logits, l 
Example #11
Source File: resnet_model.py    From LQ-Nets with MIT License 5 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func, qw=1):
    with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
                  W_init=variance_scaling_initializer(mode='FAN_OUT'),
                  data_format=get_arg_scope()['Conv2D']['data_format'],
                  nbit=qw):
        logits = (LinearWrap(image)
                  .Conv2DQuant('conv0', 64, 7, stride=2, nl=BNReLU, is_quant=False)
                  .MaxPooling('pool0', shape=3, stride=2, padding='SAME')
                  .apply(group_func, 'group0', block_func, 64, num_blocks[0], 1)
                  .apply(group_func, 'group1', block_func, 128, num_blocks[1], 2)
                  .apply(group_func, 'group2', block_func, 256, num_blocks[2], 2)
                  .apply(group_func, 'group3', block_func, 512, num_blocks[3], 2, is_last=True)
                  .GlobalAvgPooling('gap')
                  .FullyConnected('linear', 1000, nl=tf.identity)())
    return logits 
Example #12
Source File: vgg_model.py    From LQ-Nets with MIT License 5 votes vote down vote up
def vgg_backbone(image, qw=1):
    with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
                  W_init=variance_scaling_initializer(mode='FAN_IN'),
                  data_format=get_arg_scope()['Conv2D']['data_format'],
                  nbit=qw):
        logits = (LinearWrap(image)
                  .Conv2DQuant('conv1', 96, 7, stride=2, nl=tf.nn.relu, is_quant=False)
                  .MaxPooling('pool1', shape=2, stride=2, padding='VALID')
                  # 56
                  .BNReLUQuant('bnquant2_0')
                  .Conv2DQuant('conv2_1', 256, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv2_2', 256, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv2_3', 256, 3)
                  .MaxPooling('pool2', shape=2, stride=2, padding='VALID')
                  # 28
                  .BNReLUQuant('bnquant3_0')
                  .Conv2DQuant('conv3_1', 512, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv3_2', 512, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv3_3', 512, 3)
                  .MaxPooling('pool3', shape=2, stride=2, padding='VALID')
                  # 14
                  .BNReLUQuant('bnquant4_0')
                  .Conv2DQuant('conv4_1', 512, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv4_2', 512, 3, nl=getBNReLUQuant)
                  .Conv2DQuant('conv4_3', 512, 3)
                  .MaxPooling('pool4', shape=2, stride=2, padding='VALID')
                  # 7
                  .BNReLUQuant('bnquant5')
                  .Conv2DQuant('fc5', 4096, 7, nl=getfcBNReLUQuant, padding='VALID', use_bias=True)
                  .Conv2DQuant('fc6', 4096, 1, nl=getfcBNReLU, padding='VALID', use_bias=True)
                  .FullyConnected('fc7', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))())
    return logits 
Example #13
Source File: basemodel.py    From PReMVOS with MIT License 5 votes vote down vote up
def resnet_conv5(image, num_block):
    with argscope([Conv2D, BatchNorm], data_format='NCHW'), \
            argscope(Conv2D, nl=tf.identity, use_bias=False), \
            argscope(BatchNorm, use_local_stat=False):
        # 14x14:
        l = resnet_group(image, 'group3', resnet_bottleneck, 512, num_block, stride=2)
        return l 
Example #14
Source File: googlenet_model.py    From LQ-Nets with MIT License 5 votes vote down vote up
def googlenet_backbone(image, qw=1):
    with argscope(Conv2DQuant, nl=tf.identity, use_bias=False,
                  W_init=variance_scaling_initializer(mode='FAN_IN'),
                  data_format=get_arg_scope()['Conv2D']['data_format'],
                  nbit=qw,
                  is_quant=True if qw > 0 else False):
        logits = (LinearWrap(image)
                  .Conv2DQuant('conv1', 64, 7, stride=2, is_quant=False)
                  .MaxPooling('pool1', shape=3, stride=2, padding='SAME')
                  .BNReLUQuant('pool1/out')
                  .Conv2DQuant('conv2/3x3_reduce', 192, 1, nl=getBNReLUQuant)
                  .Conv2DQuant('conv2/3x3', 192, 3)
                  .MaxPooling('pool2', shape=3, stride=2, padding='SAME')
                  .BNReLUQuant('pool2/out')
                  .apply(inception_block, 'incpetion_3a', 96, 128, 32)
                  .apply(inception_block, 'incpetion_3b', 192, 192, 96, is_last_block=True)
                  .apply(inception_block, 'incpetion_4a', 256, 208, 48)
                  .apply(inception_block, 'incpetion_4b', 224, 224, 64)
                  .apply(inception_block, 'incpetion_4c', 192, 256, 64)
                  .apply(inception_block, 'incpetion_4d', 176, 288, 64)
                  .apply(inception_block, 'incpetion_4e', 384, 320, 128, is_last_block=True)
                  .apply(inception_block, 'incpetion_5a', 384, 320, 128)
                  .apply(inception_block, 'incpetion_5b', 512, 384, 128, is_last_block=True, is_last=True)
                  .GlobalAvgPooling('pool5')
                  .FullyConnected('linear', out_dim=1000, nl=tf.identity)())
    return logits 
Example #15
Source File: model.py    From PReMVOS with MIT License 5 votes vote down vote up
def maskrcnn_head(feature, num_class):
    """
    Args:
        feature (NxCx7x7):
        num_classes(int): num_category + 1

    Returns:
        mask_logits (N x num_category x 14 x 14):
    """
    with argscope([Conv2D, Deconv2D], data_format='NCHW',
                  W_init=tf.variance_scaling_initializer(
                      scale=2.0, mode='fan_in', distribution='normal')):
        l = Deconv2D('deconv', feature, 256, 2, stride=2, nl=tf.nn.relu)
        l = Conv2D('conv', l, num_class - 1, 1)
    return l 
Example #16
Source File: resnet_model.py    From adanet with MIT License 5 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        # Note that this pads the image by [2, 3] instead of [3, 2].
        # Similar things happen in later stride=2 layers as well.
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    return logits, l 
Example #17
Source File: resnet_model.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        # Note that TF pads the image by [2, 3] instead of [3, 2].
        # Similar things happen in later stride=2 layers as well.
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    return logits 
Example #18
Source File: resnet_model.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    with argscope(Conv2D, use_bias=False,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        # Note that TF pads the image by [2, 3] instead of [3, 2].
        # Similar things happen in later stride=2 layers as well.
        l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU)
        l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME')
        l = group_func('group0', l, block_func, 64, num_blocks[0], 1)
        l = group_func('group1', l, block_func, 128, num_blocks[1], 2)
        l = group_func('group2', l, block_func, 256, num_blocks[2], 2)
        l = group_func('group3', l, block_func, 512, num_blocks[3], 2)
        l = GlobalAvgPooling('gap', l)
        logits = FullyConnected('linear', l, 1000,
                                kernel_initializer=tf.random_normal_initializer(stddev=0.01))
    return logits 
Example #19
Source File: resnet_model.py    From webvision-2.0-benchmarks with Apache License 2.0 5 votes vote down vote up
def resnet_backbone(image, num_blocks, group_func, block_func):
    with argscope(Conv2D, nl=tf.identity, use_bias=False,
                  W_init=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')):
        logits = (LinearWrap(image)
                  .Conv2D('conv0', 64, 7, stride=2, nl=BNReLU)
                  .MaxPooling('pool0', shape=3, stride=2, padding='SAME')
                  .apply(group_func, 'group0', block_func, 64, num_blocks[0], 1)
                  .apply(group_func, 'group1', block_func, 128, num_blocks[1], 2)
                  .apply(group_func, 'group2', block_func, 256, num_blocks[2], 2)
                  .apply(group_func, 'group3', block_func, 512, num_blocks[3], 2)
                  .GlobalAvgPooling('gap')
                  .FullyConnected('linear', NUM_CLASSES, nl=tf.identity)())
    return logits 
Example #20
Source File: vgg.py    From ADL with MIT License 4 votes vote down vote up
def vgg_gap(image, option):
    with argscope(Conv2D, use_bias=True,
                  kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \
         argscope([Conv2D, MaxPooling, BatchNorm, GlobalAvgPooling],
                  data_format='channels_first'):

        l = convnormrelu(image, 'conv1_1', 64)
        if option.gating_position[11]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv1_2', 64)
        if option.gating_position[12]: l = gating_op(l, option)
        l = MaxPooling('pool1', l, 2)
        if option.gating_position[1]: l = gating_op(l, option)

        l = convnormrelu(l, 'conv2_1', 128)
        if option.gating_position[21]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv2_2', 128)
        if option.gating_position[22]: l = gating_op(l, option)
        l = MaxPooling('pool2', l, 2)
        if option.gating_position[2]: l = gating_op(l, option)

        l = convnormrelu(l, 'conv3_1', 256)
        if option.gating_position[31]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv3_2', 256)
        if option.gating_position[32]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv3_3', 256)
        if option.gating_position[33]: l = gating_op(l, option)
        l = MaxPooling('pool3', l, 2)
        if option.gating_position[3]: l = gating_op(l, option)

        l = convnormrelu(l, 'conv4_1', 512)
        if option.gating_position[41]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv4_2', 512)
        if option.gating_position[42]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv4_3', 512)
        if option.gating_position[43]: l = gating_op(l, option)
        l = MaxPooling('pool4', l, 2)
        if option.gating_position[4]: l = gating_op(l, option)

        l = convnormrelu(l, 'conv5_1', 512)
        if option.gating_position[51]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv5_2', 512)
        if option.gating_position[52]: l = gating_op(l, option)
        l = convnormrelu(l, 'conv5_3', 512)
        if option.gating_position[53]: l = gating_op(l, option)

        convmaps = convnormrelu(l, 'new', 1024)
        if option.gating_position[6]: convmaps = gating_op(l, option)

        p_logits = GlobalAvgPooling('gap', convmaps)
        logits = FullyConnected('linear',
                                p_logits, option.number_of_class,
                                kernel_initializer=tf.random_normal_initializer(
                                    stddev=0.01))

    return logits, convmaps