Python tensorpack.models.Conv2D() Examples
The following are 30
code examples of tensorpack.models.Conv2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.models
, or try the search function
.
Example #1
Source File: backbone.py From tensorpack with Apache License 2.0 | 6 votes |
def resnet_c4_backbone(image, num_blocks): assert len(num_blocks) == 3 freeze_at = cfg.BACKBONE.FREEZE_AT with backbone_scope(freeze=freeze_at > 0): l = tf.pad(image, [[0, 0], [0, 0], maybe_reverse_pad(2, 3), maybe_reverse_pad(2, 3)]) l = Conv2D('conv0', l, 64, 7, strides=2, padding='VALID') l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)]) l = MaxPooling('pool0', l, 3, strides=2, padding='VALID') with backbone_scope(freeze=freeze_at > 1): c2 = resnet_group('group0', l, resnet_bottleneck, 64, num_blocks[0], 1) with backbone_scope(freeze=False): c3 = resnet_group('group1', c2, resnet_bottleneck, 128, num_blocks[1], 2) c4 = resnet_group('group2', c3, resnet_bottleneck, 256, num_blocks[2], 2) # 16x downsampling up to now return c4
Example #2
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 6 votes |
def se_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid) data_format = get_arg_scope()['Conv2D']['data_format'] ch_ax = 1 if data_format in ['NCHW', 'channels_first'] else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #3
Source File: resnet.py From ADL with MIT License | 6 votes |
def se_resnet_bottleneck(option, l, ch_out, stride, adl_index=None): shortcut = l l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) squeeze = GlobalAvgPooling('gap', l) squeeze = FullyConnected('fc1', squeeze, ch_out // 4, activation=tf.nn.relu) squeeze = FullyConnected('fc2', squeeze, ch_out * 4, activation=tf.nn.sigmoid) ch_ax = 1 if is_data_format_nchw() else 3 shape = [-1, 1, 1, 1] shape[ch_ax] = ch_out * 4 l = l * tf.reshape(squeeze, shape) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn()) out = tf.nn.relu(out) if option.gating_position[adl_index]: out = gating_op(out, option) return out
Example #4
Source File: model_frcnn.py From tensorpack with Apache License 2.0 | 6 votes |
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None): """ Args: feature (NCHW): num_classes(int): num_category + 1 num_convs (int): number of conv layers norm (str or None): either None or 'GN' Returns: 2D head feature """ assert norm in [None, 'GN'], norm l = feature with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out', distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')): for k in range(num_convs): l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu) if norm is not None: l = GroupNorm('gn{}'.format(k), l) l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM, kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu) return l
Example #5
Source File: model_rpn.py From tensorpack with Apache License 2.0 | 6 votes |
def rpn_head(featuremap, channel, num_anchors): """ Returns: label_logits: fHxfWxNA box_logits: fHxfWxNAx4 """ with argscope(Conv2D, data_format='channels_first', kernel_initializer=tf.random_normal_initializer(stddev=0.01)): hidden = Conv2D('conv0', featuremap, channel, 3, activation=tf.nn.relu) label_logits = Conv2D('class', hidden, num_anchors, 1) box_logits = Conv2D('box', hidden, 4 * num_anchors, 1) # 1, NA(*4), im/16, im/16 (NCHW) label_logits = tf.transpose(label_logits, [0, 2, 3, 1]) # 1xfHxfWxNA label_logits = tf.squeeze(label_logits, 0) # fHxfWxNA shp = tf.shape(box_logits) # 1x(NAx4)xfHxfW box_logits = tf.transpose(box_logits, [0, 2, 3, 1]) # 1xfHxfWx(NAx4) box_logits = tf.reshape(box_logits, tf.stack([shp[2], shp[3], num_anchors, 4])) # fHxfWxNAx4 return label_logits, box_logits
Example #6
Source File: graph.py From hover_net with MIT License | 6 votes |
def encoder(i, freeze): """ Pre-activated ResNet50 Encoder """ d1 = Conv2D('conv0', i, 64, 7, padding='valid', strides=1, activation=BNReLU) d1 = res_blk('group0', d1, [ 64, 64, 256], [1, 3, 1], 3, strides=1, freeze=freeze) d2 = res_blk('group1', d1, [128, 128, 512], [1, 3, 1], 4, strides=2, freeze=freeze) d2 = tf.stop_gradient(d2) if freeze else d2 d3 = res_blk('group2', d2, [256, 256, 1024], [1, 3, 1], 6, strides=2, freeze=freeze) d3 = tf.stop_gradient(d3) if freeze else d3 d4 = res_blk('group3', d3, [512, 512, 2048], [1, 3, 1], 3, strides=2, freeze=freeze) d4 = tf.stop_gradient(d4) if freeze else d4 d4 = Conv2D('conv_bot', d4, 1024, 1, padding='same') return [d1, d2, d3, d4] ####
Example #7
Source File: graph.py From hover_net with MIT License | 6 votes |
def dense_blk(name, l, ch, ksize, count, split=1, padding='valid'): with tf.variable_scope(name): for i in range(0, count): with tf.variable_scope('blk/' + str(i)): x = BNReLU('preact_bna', l) x = Conv2D('conv1', x, ch[0], ksize[0], padding=padding, activation=BNReLU) x = Conv2D('conv2', x, ch[1], ksize[1], padding=padding, split=split) ## if padding == 'valid': x_shape = x.get_shape().as_list() l_shape = l.get_shape().as_list() l = crop_op(l, (l_shape[2] - x_shape[2], l_shape[3] - x_shape[3])) l = tf.concat([l, x], axis=1) l = BNReLU('blk_bna', l) return l ####
Example #8
Source File: graph.py From hover_net with MIT License | 6 votes |
def res_blk(name, l, ch, ksize, count, split=1, strides=1, freeze=False): ch_in = l.get_shape().as_list() with tf.variable_scope(name): for i in range(0, count): with tf.variable_scope('block' + str(i)): x = l if i == 0 else BNReLU('preact', l) x = Conv2D('conv1', x, ch[0], ksize[0], activation=BNReLU) x = Conv2D('conv2', x, ch[1], ksize[1], split=split, strides=strides if i == 0 else 1, activation=BNReLU) x = Conv2D('conv3', x, ch[2], ksize[2], activation=tf.identity) if (strides != 1 or ch_in[1] != ch[2]) and i == 0: l = Conv2D('convshortcut', l, ch[2], 1, strides=strides) x = tf.stop_gradient(x) if freeze else x l = l + x # end of each group need an extra activation l = BNReLU('bnlast',l) return l ####
Example #9
Source File: backbone.py From tensorpack with Apache License 2.0 | 6 votes |
def resnet_bottleneck(l, ch_out, stride): shortcut = l if cfg.BACKBONE.STRIDE_1X1: if stride == 2: l = l[:, :, :-1, :-1] l = Conv2D('conv1', l, ch_out, 1, strides=stride) l = Conv2D('conv2', l, ch_out, 3, strides=1) else: l = Conv2D('conv1', l, ch_out, 1, strides=1) if stride == 2: l = tf.pad(l, [[0, 0], [0, 0], maybe_reverse_pad(0, 1), maybe_reverse_pad(0, 1)]) l = Conv2D('conv2', l, ch_out, 3, strides=2, padding='VALID') else: l = Conv2D('conv2', l, ch_out, 3, strides=stride) if cfg.BACKBONE.NORM != 'None': l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_norm(zero_init=True)) else: l = Conv2D('conv3', l, ch_out * 4, 1, activation=tf.identity, kernel_initializer=tf.constant_initializer()) ret = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_norm(zero_init=False)) return tf.nn.relu(ret, name='output')
Example #10
Source File: backbone.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): n_in = l.shape[1] if n_in != n_out: # change dimension when channel is not the same # TF's SAME mode output ceil(x/stride), which is NOT what we want when x is odd and stride is 2 # In FPN mode, the images are pre-padded already. if not cfg.MODE_FPN and stride == 2: l = l[:, :, :-1, :-1] return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l
Example #11
Source File: backbone.py From tensorpack with Apache License 2.0 | 5 votes |
def backbone_scope(freeze): """ Args: freeze (bool): whether to freeze all the variables under the scope """ def nonlin(x): x = get_norm()(x) return tf.nn.relu(x) with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \ argscope(Conv2D, use_bias=False, activation=nonlin, kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out')), \ ExitStack() as stack: if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']: if freeze or cfg.BACKBONE.NORM == 'FreezeBN': stack.enter_context(argscope(BatchNorm, training=False)) else: stack.enter_context(argscope( BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod')) if freeze: stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True)) else: # the layers are not completely freezed, but we may want to only freeze the affine if cfg.BACKBONE.FREEZE_AFFINE: stack.enter_context(custom_getter_scope(freeze_affine_getter)) yield
Example #12
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_backbone(image, num_blocks, group_func, block_func): with argscope(Conv2D, use_bias=False, kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')): # Note that TF pads the image by [2, 3] instead of [3, 2]. # Similar things happen in later stride=2 layers as well. l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU) l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME') l = group_func('group0', l, block_func, 64, num_blocks[0], 1) l = group_func('group1', l, block_func, 128, num_blocks[1], 2) l = group_func('group2', l, block_func, 256, num_blocks[2], 2) l = group_func('group3', l, block_func, 512, num_blocks[3], 2) l = GlobalAvgPooling('gap', l) logits = FullyConnected('linear', l, 1000, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) return logits
Example #13
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnext32x4d_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out * 2, 1, strides=1, activation=BNReLU) l = Conv2D('conv2', l, ch_out * 2, 3, strides=stride, activation=BNReLU, split=32) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #14
Source File: myconv2d.py From ghostnet with Apache License 2.0 | 5 votes |
def SELayer(x, out_dim, ratio): squeeze = utils.spatial_mean(x, keep_dims=True, scope='global_pool') excitation = Conv2D('fc1', squeeze, int(out_dim / ratio), 1, strides=1, kernel_initializer=kernel_initializer, data_format='NHWC', activation=None) excitation = tf.nn.relu(excitation, name='relu') excitation = Conv2D('fc2', excitation, out_dim, 1, strides=1, kernel_initializer=kernel_initializer, data_format='NHWC', activation=None) excitation = tf.clip_by_value(excitation, 0, 1, name='hsigmoid') scale = x * excitation return scale
Example #15
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_bottleneck(l, ch_out, stride, stride_first=False): """ stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv. """ shortcut = l l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #16
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def preact_bottleneck(l, ch_out, stride, preact): # stride is applied on the second conv, following fb.resnet.torch l, shortcut = apply_preactivation(l, preact) l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1) return l + resnet_shortcut(shortcut, ch_out * 4, stride)
Example #17
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def preact_basicblock(l, ch_out, stride, preact): l, shortcut = apply_preactivation(l, preact) l = Conv2D('conv1', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3) return l + resnet_shortcut(shortcut, ch_out, stride)
Example #18
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l
Example #19
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_backbone(image, num_blocks, group_func, block_func): with argscope(Conv2D, use_bias=False, kernel_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')): # Note that TF pads the image by [2, 3] instead of [3, 2]. # Similar things happen in later stride=2 layers as well. l = Conv2D('conv0', image, 64, 7, strides=2, activation=BNReLU) l = MaxPooling('pool0', l, pool_size=3, strides=2, padding='SAME') l = group_func('group0', l, block_func, 64, num_blocks[0], 1) l = group_func('group1', l, block_func, 128, num_blocks[1], 2) l = group_func('group2', l, block_func, 256, num_blocks[2], 2) l = group_func('group3', l, block_func, 512, num_blocks[3], 2) l = GlobalAvgPooling('gap', l) logits = FullyConnected('linear', l, 1000, kernel_initializer=tf.random_normal_initializer(stddev=0.01)) return logits
Example #20
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnext32x4d_bottleneck(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out * 2, 1, strides=1, activation=BNReLU) l = Conv2D('conv2', l, ch_out * 2, 3, strides=stride, activation=BNReLU, split=32) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #21
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_bottleneck(l, ch_out, stride, stride_first=False): """ stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv. """ shortcut = l l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) out = l + resnet_shortcut(shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #22
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_basicblock(l, ch_out, stride): shortcut = l l = Conv2D('conv1', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, activation=get_bn(zero_init=True)) out = l + resnet_shortcut(shortcut, ch_out, stride, activation=get_bn(zero_init=False)) return tf.nn.relu(out)
Example #23
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def preact_bottleneck(l, ch_out, stride, preact): # stride is applied on the second conv, following fb.resnet.torch l, shortcut = apply_preactivation(l, preact) l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1) return l + resnet_shortcut(shortcut, ch_out * 4, stride)
Example #24
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def preact_basicblock(l, ch_out, stride, preact): l, shortcut = apply_preactivation(l, preact) l = Conv2D('conv1', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv2', l, ch_out, 3) return l + resnet_shortcut(shortcut, ch_out, stride)
Example #25
Source File: resnet_model.py From tensorpack with Apache License 2.0 | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): data_format = get_arg_scope()['Conv2D']['data_format'] n_in = l.get_shape().as_list()[1 if data_format in ['NCHW', 'channels_first'] else 3] if n_in != n_out: # change dimension when channel is not the same return Conv2D('convshortcut', l, n_out, 1, strides=stride, activation=activation) else: return l
Example #26
Source File: resnet.py From ADL with MIT License | 5 votes |
def resnet_shortcut(l, n_out, stride, activation=tf.identity): n_in = l.get_shape().as_list()[1 if is_data_format_nchw() else 3] if n_in != n_out: return Conv2D('convshortcut', l, n_out, 1, stride=stride, activation=activation) else: return l
Example #27
Source File: resnet.py From ADL with MIT License | 5 votes |
def resnet(input_, option): mode = option.mode DEPTH = option.depth bottleneck = {'se': se_resnet_bottleneck}[mode] cfg = { 50: ([3, 4, 6, 3], bottleneck), } defs, block_func = cfg[DEPTH] group_func = resnet_group with argscope(Conv2D, use_bias=False, kernel_initializer= \ tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), \ argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'): l = Conv2D('conv0', input_, 64, 7, strides=2, activation=BNReLU) if option.gating_position[0]: l = gating_op(l, option) l = MaxPooling('pool0', l, 3, strides=2, padding='SAME') if option.gating_position[1]: l = gating_op(l, option) l = group_func('group0', l, block_func, 64, defs[0], 1, option) if option.gating_position[2]: l = gating_op(l, option) l = group_func('group1', l, block_func, 128, defs[1], 2, option) if option.gating_position[3]: l = gating_op(l, option) l = group_func('group2', l, block_func, 256, defs[2], 2, option) if option.gating_position[4]: l = gating_op(l, option) l = group_func('group3', l, block_func, 512, defs[3], 1, option) if option.gating_position[5]: l = gating_op(l, option) p_logits = GlobalAvgPooling('gap', l) logits = FullyConnected('linearnew', p_logits, option.number_of_class) return logits, l
Example #28
Source File: resnet.py From ADL with MIT License | 5 votes |
def is_data_format_nchw(): data_format = get_arg_scope()['Conv2D']['data_format'] return data_format in ['NCHW', 'channels_first']
Example #29
Source File: RHP_ops.py From Regional-Homogeneity with MIT License | 5 votes |
def conv_with_rn(gradient): out = Conv2D('conv', gradient, gradient.get_shape()[3], 1, strides=1, activation=get_rn(), kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0)) gradient = gradient + out return gradient
Example #30
Source File: graph.py From hover_net with MIT License | 5 votes |
def decoder(name, i): pad = 'valid' # to prevent boundary artifacts with tf.variable_scope(name): with tf.variable_scope('u3'): u3 = upsample2x('rz', i[-1]) u3_sum = tf.add_n([u3, i[-2]]) u3 = Conv2D('conva', u3_sum, 256, 5, strides=1, padding=pad) u3 = dense_blk('dense', u3, [128, 32], [1, 5], 8, split=4, padding=pad) u3 = Conv2D('convf', u3, 512, 1, strides=1) #### with tf.variable_scope('u2'): u2 = upsample2x('rz', u3) u2_sum = tf.add_n([u2, i[-3]]) u2x = Conv2D('conva', u2_sum, 128, 5, strides=1, padding=pad) u2 = dense_blk('dense', u2x, [128, 32], [1, 5], 4, split=4, padding=pad) u2 = Conv2D('convf', u2, 256, 1, strides=1) #### with tf.variable_scope('u1'): u1 = upsample2x('rz', u2) u1_sum = tf.add_n([u1, i[-4]]) u1 = Conv2D('conva', u1_sum, 64, 5, strides=1, padding='same') return [u3, u2x, u1] ####