Python tensorpack.tfutils.argscope.get_arg_scope() Examples
The following are 16
code examples of tensorpack.tfutils.argscope.get_arg_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.tfutils.argscope
, or try the search function
.
Example #1
Source File: alexnet_model.py From LQ-Nets with MIT License | 6 votes |
def alexnet_backbone(image, qw=1): with argscope(Conv2DQuant, nl=tf.identity, use_bias=False, W_init=tf.random_normal_initializer(stddev=0.01), data_format=get_arg_scope()['Conv2D']['data_format'], nbit=qw): logits = (LinearWrap(image) .Conv2DQuant('conv1', 96, 11, stride=4, is_quant=False, padding='VALID') .MaxPooling('pool1', shape=3, stride=2, padding='VALID') .BNReLUQuant('bnquant2') .Conv2DQuant('conv2', 256, 5) .MaxPooling('pool2', shape=3, stride=2, padding='VALID') .BNReLUQuant('bnquant3') .Conv2DQuant('conv3', 384, 3, nl=getBNReLUQuant) .Conv2DQuant('conv4', 384, 3, nl=getBNReLUQuant) .Conv2DQuant('conv5', 256, 3) .MaxPooling('pool5', shape=3, stride=2, padding='VALID') .BNReLUQuant('bnquant6') .Conv2DQuant('fc6', 4096, 6, nl=getfcBNReLUQuant, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True) .Conv2DQuant('fc7', 4096, 1, nl=getfcBNReLU, padding='VALID', W_init=tf.random_normal_initializer(stddev=0.005), use_bias=True) .FullyConnected('fc8', out_dim=1000, nl=tf.identity, W_init=tf.random_normal_initializer(stddev=0.01))()) return logits
Example #2
Source File: densenet_model.py From LQ-Nets with MIT License | 6 votes |
def densenet_backbone(image, qw=1): with argscope(Conv2DQuant, nl=tf.identity, use_bias=False, W_init=variance_scaling_initializer(mode='FAN_IN'), data_format=get_arg_scope()['Conv2D']['data_format'], nbit=qw, is_quant=True if qw > 0 else False): logits = (LinearWrap(image) .Conv2DQuant('conv1', 2 * GROWTH_RATE, 7, stride=2, nl=BNReLU, is_quant=False) .MaxPooling('pool1', shape=3, stride=2, padding='SAME') # 56 .apply(add_dense_block, 'block0', 6) # 28 .apply(add_dense_block, 'block1', 12) # 14 .apply(add_dense_block, 'block2', 24) # 7 .apply(add_dense_block, 'block3', 16, last=True) .BNReLU('bnrelu_last') .GlobalAvgPooling('gap') .FullyConnected('linear', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))()) return logits
Example #3
Source File: googlenet_model.py From LQ-Nets with MIT License | 6 votes |
def inception_block(l, name, ch_1x1, ch_3x3, ch_5x5, is_last_block=False, is_last=False): data_format = get_arg_scope()['Conv2DQuant']['data_format'] with tf.variable_scope(name): conv1x1 = Conv2DQuant('1x1', l, ch_1x1, 1, nl=getBNReLUQuant if not is_last_block else tf.identity) conv3x3_reduce = Conv2DQuant('3x3_reduce', l, ch_3x3, 1, nl=getBNReLUQuant) conv3x3 = Conv2DQuant('3x3', conv3x3_reduce, ch_3x3, 3, nl=getBNReLUQuant if not is_last_block else tf.identity) conv5x5_reduce = Conv2DQuant('5x5_reduce', l, ch_5x5, 1, nl=getBNReLUQuant) conv5x5 = Conv2DQuant('5x5', conv5x5_reduce, ch_5x5, 5, nl=getBNReLUQuant if not is_last_block else tf.identity) if is_last_block and not is_last: conv1x1 = MaxPooling('pool_1x1', conv1x1, shape=3, stride=2, padding='SAME') conv1x1 = BNReLU('conv1x1_bn', conv1x1) conv1x1 = QuantizedActiv('conv1x1_quant', conv1x1) conv3x3 = MaxPooling('pool_3x3', conv3x3, shape=3, stride=2, padding='SAME') conv3x3 = BNReLU('conv3x3_bn', conv3x3) conv3x3 = QuantizedActiv('conv3x3_quant', conv3x3) conv5x5 = MaxPooling('pool_5x5', conv5x5, shape=3, stride=2, padding='SAME') conv5x5 = BNReLU('conv5x5_bn', conv5x5) conv5x5 = QuantizedActiv('conv5x5_quant', conv5x5) l = tf.concat([ conv1x1, conv3x3, conv5x5], 1 if data_format == 'NCHW' else 3, name='concat') if is_last: l = BNReLU('output_bn', l) return l
Example #4
Source File: resnet_model.py From adanet with MIT License | 6 votes |
def se_resnet_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid) data_format = get_arg_scope()['Conv2D']['data_format'] ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #5
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 6 votes |
def se_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid) data_format = get_arg_scope()['Conv2D']['data_format'] ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #6
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 6 votes |
def se_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid) data_format = get_arg_scope()['Conv2D']['data_format'] ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #7
Source File: resnet_model.py From webvision-2.0-benchmarks with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, nl=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format == 'NCHW' else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, stride=stride, nl=nl) else: return l
Example #8
Source File: resnet_model.py From webvision-2.0-benchmarks with Apache License 2.0 | 5 votes |
def se_resnet_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU) l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, nl=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, nl=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, nl=tf.nn.sigmoid) data_format = get_arg_scope()['Conv2D']['data_format'] ch_ax = 1 if data_format == 'NCHW' else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) return l + resnet_shortcut(shortcut, ch_out * 4, stride, nl=get_bn(zero_init=False))
Example #9
Source File: googlenet_model.py From LQ-Nets with MIT License | 5 votes |
def googlenet_backbone(image, qw=1): with argscope(Conv2DQuant, nl=tf.identity, use_bias=False, W_init=variance_scaling_initializer(mode='FAN_IN'), data_format=get_arg_scope()['Conv2D']['data_format'], nbit=qw, is_quant=True if qw > 0 else False): logits = (LinearWrap(image) .Conv2DQuant('conv1', 64, 7, stride=2, is_quant=False) .MaxPooling('pool1', shape=3, stride=2, padding='SAME') .BNReLUQuant('pool1/out') .Conv2DQuant('conv2/3x3_reduce', 192, 1, nl=getBNReLUQuant) .Conv2DQuant('conv2/3x3', 192, 3) .MaxPooling('pool2', shape=3, stride=2, padding='SAME') .BNReLUQuant('pool2/out') .apply(inception_block, 'incpetion_3a', 96, 128, 32) .apply(inception_block, 'incpetion_3b', 192, 192, 96, is_last_block=True) .apply(inception_block, 'incpetion_4a', 256, 208, 48) .apply(inception_block, 'incpetion_4b', 224, 224, 64) .apply(inception_block, 'incpetion_4c', 192, 256, 64) .apply(inception_block, 'incpetion_4d', 176, 288, 64) .apply(inception_block, 'incpetion_4e', 384, 320, 128, is_last_block=True) .apply(inception_block, 'incpetion_5a', 384, 320, 128) .apply(inception_block, 'incpetion_5b', 512, 384, 128, is_last_block=True, is_last=True) .GlobalAvgPooling('pool5') .FullyConnected('linear', out_dim=1000, nl=tf.identity)()) return logits
Example #10
Source File: vgg_model.py From LQ-Nets with MIT License | 5 votes |
def vgg_backbone(image, qw=1): with argscope(Conv2DQuant, nl=tf.identity, use_bias=False, W_init=variance_scaling_initializer(mode='FAN_IN'), data_format=get_arg_scope()['Conv2D']['data_format'], nbit=qw): logits = (LinearWrap(image) .Conv2DQuant('conv1', 96, 7, stride=2, nl=tf.nn.relu, is_quant=False) .MaxPooling('pool1', shape=2, stride=2, padding='VALID') # 56 .BNReLUQuant('bnquant2_0') .Conv2DQuant('conv2_1', 256, 3, nl=getBNReLUQuant) .Conv2DQuant('conv2_2', 256, 3, nl=getBNReLUQuant) .Conv2DQuant('conv2_3', 256, 3) .MaxPooling('pool2', shape=2, stride=2, padding='VALID') # 28 .BNReLUQuant('bnquant3_0') .Conv2DQuant('conv3_1', 512, 3, nl=getBNReLUQuant) .Conv2DQuant('conv3_2', 512, 3, nl=getBNReLUQuant) .Conv2DQuant('conv3_3', 512, 3) .MaxPooling('pool3', shape=2, stride=2, padding='VALID') # 14 .BNReLUQuant('bnquant4_0') .Conv2DQuant('conv4_1', 512, 3, nl=getBNReLUQuant) .Conv2DQuant('conv4_2', 512, 3, nl=getBNReLUQuant) .Conv2DQuant('conv4_3', 512, 3) .MaxPooling('pool4', shape=2, stride=2, padding='VALID') # 7 .BNReLUQuant('bnquant5') .Conv2DQuant('fc5', 4096, 7, nl=getfcBNReLUQuant, padding='VALID', use_bias=True) .Conv2DQuant('fc6', 4096, 1, nl=getfcBNReLU, padding='VALID', use_bias=True) .FullyConnected('fc7', out_dim=1000, nl=tf.identity, W_init=variance_scaling_initializer(mode='FAN_IN'))()) return logits
Example #11
Source File: resnet_model.py From LQ-Nets with MIT License | 5 votes |
def resnet_backbone(image, num_blocks, group_func, block_func, qw=1): with argscope(Conv2DQuant, nl=tf.identity, use_bias=False, W_init=variance_scaling_initializer(mode='FAN_OUT'), data_format=get_arg_scope()['Conv2D']['data_format'], nbit=qw): logits = (LinearWrap(image) .Conv2DQuant('conv0', 64, 7, stride=2, nl=BNReLU, is_quant=False) .MaxPooling('pool0', shape=3, stride=2, padding='SAME') .apply(group_func, 'group0', block_func, 64, num_blocks[0], 1) .apply(group_func, 'group1', block_func, 128, num_blocks[1], 2) .apply(group_func, 'group2', block_func, 256, num_blocks[2], 2) .apply(group_func, 'group3', block_func, 512, num_blocks[3], 2, is_last=True) .GlobalAvgPooling('gap') .FullyConnected('linear', 1000, nl=tf.identity)()) return logits
Example #12
Source File: resnet.py From ADL with MIT License | 5 votes |
def is_data_format_nchw(): data_format = get_arg_scope()['Conv2D']['data_format'] return data_format in ['NCHW', 'channels_first']
Example #13
Source File: basemodel.py From PReMVOS with MIT License | 5 votes |
def resnet_shortcut(l, n_out, stride, nl=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format == 'NCHW' else 3] if n_in != n_out: # change dimension when channel is not the same if stride == 2: l = l[:, :, :-1, :-1] return Conv2D('convshortcut', l, n_out, 1, stride=stride, padding='VALID', nl=nl) else: return Conv2D('convshortcut', l, n_out, 1, stride=stride, nl=nl) else: return l
Example #14
Source File: resnet_model.py From adanet with MIT License | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l
Example #15
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l
Example #16
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l