Python tensorpack.tfutils.argscope() Examples
The following are 13
code examples of tensorpack.tfutils.argscope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorpack.tfutils
, or try the search function
.
Example #1
Source File: imagenet.py From LQ-Nets with MIT License | 6 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format), \ argscope([QuantizedActiv], nbit=self.qa): if self.mode == 'vgg': return vgg_backbone(image, self.qw) elif self.mode == 'alexnet': return alexnet_backbone(image, self.qw) elif self.mode == 'googlenet': return googlenet_backbone(image, self.qw) elif self.mode == 'densenet': return densenet_backbone(image, self.qw) else: if self.mode == 'preact': group_func = preresnet_group elif self.mode == 'preact_typeA': group_func = preresnet_group_typeA else: group_func = resnet_group return resnet_backbone( image, self.num_blocks, group_func, self.block_func, self.qw)
Example #2
Source File: imagenet-resnet-gn.py From GroupNorm-reproduce with Apache License 2.0 | 6 votes |
def get_logits(self, image): def weight_standardization(v): if not self.use_WS: return v if (not v.name.endswith('/W:0')) or v.shape.ndims != 4: return v mean, var = tf.nn.moments(v, [0, 1, 2], keep_dims=True) v = (v - mean) / (tf.sqrt(var)+ 1e-5) return v num_blocks = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3]}[self.depth] block_func = resnet_bottleneck with argscope([Conv2D, MaxPooling, GlobalAvgPooling], data_format=self.data_format), \ varreplace.remap_variables(weight_standardization): return resnet_backbone( image, num_blocks, resnet_group, block_func)
Example #3
Source File: ghostnet.py From ghostnet with Apache License 2.0 | 5 votes |
def get_logits(self, inputs): sc = ghostnet_arg_scope( data_format=self.data_format, weight_decay=self.weight_decay, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001, regularize_depthwise=False) with slim.arg_scope(sc): with argscope(Conv2D, kernel_initializer=kernel_initializer): with argscope([Conv2D, BatchNorm], data_format=self.data_format): logits, end_points = ghost_net( inputs, dw_code=self.dw_code, ratio_code=self.ratio_code, se=self.se, num_classes=self.num_classes, dropout_keep_prob=self.dropout_keep_prob, min_depth=8, depth_multiplier=self.depth_multiplier, depth=self.depth, conv_defs=None, prediction_fn=tf.contrib.layers.softmax, spatial_squeeze=True, reuse=None, scope=self.scope, global_pool=False) return logits # =========================================================================== # # Functional definition. # =========================================================================== # # Conv and Bottleneck namedtuple define layers of the GhostNet architecture # Conv defines 3x3 convolution layers # stride is the stride of the convolution # depth is the number of channels or filters in a layer
Example #4
Source File: imagenet-resnet-horovod.py From benchmarks with The Unlicense | 5 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format='NCHW'), \ argscope(Norm, type=self.norm): return resnet_backbone(image, self.num_blocks, resnet_group, resnet_bottleneck)
Example #5
Source File: imagenet-resnet.py From webvision-2.0-benchmarks with Apache License 2.0 | 5 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format): return resnet_backbone( image, self.num_blocks, preresnet_group if self.mode == 'preact' else resnet_group, self.block_func)
Example #6
Source File: common.py From petridishnn with MIT License | 5 votes |
def _get_dim(x, dim): """ Get the dimension value of a 4-tensor. Helper for _get_C/H/W. get_C,H,W will use argscope to set the default dim. Otherwise we assume it is NCHW. """ return x.get_shape().as_list()[dim]
Example #7
Source File: adanet-resnet.py From adanet with MIT License | 5 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format): return resnet_backbone( image, self.num_blocks, preresnet_group if self.mode == 'preact' else resnet_group, self.block_func)
Example #8
Source File: imagenet-resnet.py From tensorpack with Apache License 2.0 | 5 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format): return resnet_backbone( image, self.num_blocks, preact_group if self.mode == 'preact' else resnet_group, self.block_func)
Example #9
Source File: shufflenet.py From tensorpack with Apache License 2.0 | 5 votes |
def get_logits(self, image): with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'), \ argscope(Conv2D, use_bias=False): group = args.group if not args.v2: # Copied from the paper channels = { 3: [240, 480, 960], 4: [272, 544, 1088], 8: [384, 768, 1536] } mul = group * 4 # #chan has to be a multiple of this number channels = [int(math.ceil(x * args.ratio / mul) * mul) for x in channels[group]] # The first channel must be a multiple of group first_chan = int(math.ceil(24 * args.ratio / group) * group) else: # Copied from the paper channels = { 0.5: [48, 96, 192], 1.: [116, 232, 464] }[args.ratio] first_chan = 24 logger.info("#Channels: " + str([first_chan] + channels)) l = Conv2D('conv1', image, first_chan, 3, strides=2, activation=BNReLU) l = MaxPooling('pool1', l, 3, 2, padding='SAME') l = shufflenet_stage('stage2', l, channels[0], 4, group) l = shufflenet_stage('stage3', l, channels[1], 8, group) l = shufflenet_stage('stage4', l, channels[2], 4, group) if args.v2: l = Conv2D('conv5', l, 1024, 1, activation=BNReLU) l = GlobalAvgPooling('gap', l) logits = FullyConnected('linear', l, 1000) return logits
Example #10
Source File: vgg16.py From tensorpack with Apache License 2.0 | 5 votes |
def get_logits(self, image): with argscope(Conv2D, kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \ argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'): logits = (LinearWrap(image) .apply(convnormrelu, 'conv1_1', 64) .apply(convnormrelu, 'conv1_2', 64) .MaxPooling('pool1', 2) # 112 .apply(convnormrelu, 'conv2_1', 128) .apply(convnormrelu, 'conv2_2', 128) .MaxPooling('pool2', 2) # 56 .apply(convnormrelu, 'conv3_1', 256) .apply(convnormrelu, 'conv3_2', 256) .apply(convnormrelu, 'conv3_3', 256) .MaxPooling('pool3', 2) # 28 .apply(convnormrelu, 'conv4_1', 512) .apply(convnormrelu, 'conv4_2', 512) .apply(convnormrelu, 'conv4_3', 512) .MaxPooling('pool4', 2) # 14 .apply(convnormrelu, 'conv5_1', 512) .apply(convnormrelu, 'conv5_2', 512) .apply(convnormrelu, 'conv5_3', 512) .MaxPooling('pool5', 2) # 7 .FullyConnected('fc6', 4096, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) .tf.nn.relu(name='fc6_relu') .Dropout('drop0', rate=0.5) .FullyConnected('fc7', 4096, kernel_initializer=tf.random_normal_initializer(stddev=0.001)) .tf.nn.relu(name='fc7_relu') .Dropout('drop1', rate=0.5) .FullyConnected('fc8', 1000, kernel_initializer=tf.random_normal_initializer(stddev=0.01))()) add_param_summary(('.*', ['histogram', 'rms'])) return logits
Example #11
Source File: alexnet.py From tensorpack with Apache License 2.0 | 5 votes |
def get_logits(self, image): gauss_init = tf.random_normal_initializer(stddev=0.01) with argscope(Conv2D, kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \ argscope([Conv2D, FullyConnected], activation=tf.nn.relu), \ argscope([Conv2D, MaxPooling], data_format='channels_last'): # necessary padding to get 55x55 after conv1 image = tf.pad(image, [[0, 0], [2, 2], [2, 2], [0, 0]]) l = Conv2D('conv1', image, filters=96, kernel_size=11, strides=4, padding='VALID') # size: 55 visualize_conv1_weights(l.variables.W) l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm1') l = MaxPooling('pool1', l, 3, strides=2, padding='VALID') # 27 l = Conv2D('conv2', l, filters=256, kernel_size=5, split=2) l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm2') l = MaxPooling('pool2', l, 3, strides=2, padding='VALID') # 13 l = Conv2D('conv3', l, filters=384, kernel_size=3) l = Conv2D('conv4', l, filters=384, kernel_size=3, split=2) l = Conv2D('conv5', l, filters=256, kernel_size=3, split=2) l = MaxPooling('pool3', l, 3, strides=2, padding='VALID') l = FullyConnected('fc6', l, 4096, kernel_initializer=gauss_init, bias_initializer=tf.ones_initializer()) l = Dropout(l, rate=0.5) l = FullyConnected('fc7', l, 4096, kernel_initializer=gauss_init) l = Dropout(l, rate=0.5) logits = FullyConnected('fc8', l, 1000, kernel_initializer=gauss_init) return logits
Example #12
Source File: backbone.py From tensorpack with Apache License 2.0 | 5 votes |
def backbone_scope(freeze): """ Args: freeze (bool): whether to freeze all the variables under the scope """ def nonlin(x): x = get_norm()(x) return tf.nn.relu(x) with argscope([Conv2D, MaxPooling, BatchNorm], data_format='channels_first'), \ argscope(Conv2D, use_bias=False, activation=nonlin, kernel_initializer=tf.variance_scaling_initializer( scale=2.0, mode='fan_out')), \ ExitStack() as stack: if cfg.BACKBONE.NORM in ['FreezeBN', 'SyncBN']: if freeze or cfg.BACKBONE.NORM == 'FreezeBN': stack.enter_context(argscope(BatchNorm, training=False)) else: stack.enter_context(argscope( BatchNorm, sync_statistics='nccl' if cfg.TRAINER == 'replicated' else 'horovod')) if freeze: stack.enter_context(freeze_variables(stop_gradient=False, skip_collection=True)) else: # the layers are not completely freezed, but we may want to only freeze the affine if cfg.BACKBONE.FREEZE_AFFINE: stack.enter_context(custom_getter_scope(freeze_affine_getter)) yield
Example #13
Source File: recurrrent.py From petridishnn with MIT License | 4 votes |
def build_graph(self, seq, seq_len, label): batch_size = tf.shape(seq)[0] with argscope( [ Conv2D, Deconv2D, GroupedConv2D, AvgPooling, MaxPooling, BatchNorm, GlobalAvgPooling, ResizeImages, SeparableConv2D ], data_format=self.data_format ), \ argscope( [Conv2D, Deconv2D, GroupedConv2D, SeparableConv2D], activation=tf.identity, use_bias=self.options.use_bias ), \ argscope( [BatchNorm], center=False, scale=False, decay=self.options.batch_norm_decay, epsilon=self.options.batch_norm_epsilon ), \ argscope( [candidate_gated_layer], eps=self.options.candidate_gate_eps ): initializer = tf.random_uniform_initializer(-self.init_range, self.init_range) hid_to_fs_params = _init_feature_select( self.layer_info_list, 'master', self.options.feat_sel_lambda) seq, embedding_w = self._embed_input_if_int(seq, initializer=initializer) basic_cells = [ self._basic_cell( initializer=initializer, hid_to_fs_params=hid_to_fs_params) for _ in range(self.num_lstms) ] cells = rnn.MultiRNNCell(basic_cells) state = cells.zero_state(batch_size, dtype=tf.float32) _, state = tf.nn.dynamic_rnn( cells, seq, initial_state=state, sequence_length=seq_len) cost = None cost, _variables = feature_to_prediction_and_loss( 'rnn_pred', state, label, self.num_classes, self.options.prediction_feature, 1, is_last=True) self.cost = tf.identity(cost, name='cost') # TODO it is broken rn. because dynamic_rnn is not capatible with # hallu stats computation. self._build_hallu_stats_graph(self.cost) return self.cost