Python tensorflow.contrib.framework.python.ops.arg_scope() Examples
The following are 30
code examples of tensorflow.contrib.framework.python.ops.arg_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.framework.python.ops
, or try the search function
.
Example #1
Source File: vgg16.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer()): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #2
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 321, 321, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #3
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testClassificationShapes(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #4
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testAtrousFullyConvolutionalValues(self): """Verify dense feature extraction with atrous convolution.""" nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)): with ops.Graph().as_default(): with self.test_session() as sess: random_seed.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) # Dense feature extraction followed by subsampling. output, _ = self._resnet_small( inputs, None, global_pool=False, output_stride=output_stride) if output_stride is None: factor = 1 else: factor = nominal_stride // output_stride output = resnet_utils.subsample(output, factor) # Make the two networks use the same weights. variable_scope.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, global_pool=False) sess.run(variables.global_variables_initializer()) self.assertAllClose( output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #5
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testRootlessFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 128, 128, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, include_root_block=False, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #6
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testAtrousFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 output_stride = 8 inputs = create_test_input(2, 321, 321, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, output_stride=output_stride, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #7
Source File: resnet_v2_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testAtrousFullyConvolutionalValues(self): """Verify dense feature extraction with atrous convolution.""" nominal_stride = 32 for output_stride in [4, 8, 16, 32, None]: with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)): with ops.Graph().as_default(): with self.test_session() as sess: random_seed.set_random_seed(0) inputs = create_test_input(2, 81, 81, 3) # Dense feature extraction followed by subsampling. output, _ = self._resnet_small( inputs, None, global_pool=False, output_stride=output_stride) if output_stride is None: factor = 1 else: factor = nominal_stride // output_stride output = resnet_utils.subsample(output, factor) # Make the two networks use the same weights. variable_scope.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, global_pool=False) sess.run(variables.global_variables_initializer()) self.assertAllClose( output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
Example #8
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testAtrousFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 output_stride = 8 inputs = create_test_input(2, 321, 321, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, output_stride=output_stride, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #9
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testRootlessFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 128, 128, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, include_root_block=False, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #10
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 inputs = create_test_input(2, 321, 321, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #11
Source File: vgg.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer()): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #12
Source File: tf_train.py From iaf with MIT License | 6 votes |
def up(self, input, **_): hps = self.hps h_size = hps.h_size z_size = hps.z_size stride = [2, 2] if self.downsample else [1, 1] with arg_scope([conv2d]): x = tf.nn.elu(input) x = conv2d("up_conv1", x, 2 * z_size + 2 * h_size, stride=stride) self.qz_mean, self.qz_logsd, self.up_context, h = split(x, 1, [z_size, z_size, h_size, h_size]) h = tf.nn.elu(h) h = conv2d("up_conv3", h, h_size) if self.downsample: input = resize_nearest_neighbor(input, 0.5) return input + 0.1 * h
Example #13
Source File: truncated_vgg.py From luminoth with BSD 3-Clause "New" or "Revised" License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer() ): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #14
Source File: truncated_vgg.py From Tabulo with BSD 3-Clause "New" or "Revised" License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer() ): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #15
Source File: truncated_vgg.py From Table-Detection-using-Deep-learning with BSD 3-Clause "New" or "Revised" License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer() ): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #16
Source File: util.py From predictron with MIT License | 6 votes |
def predictron_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS, } # Set weight_decay for weights in Conv and FC layers. with arg_scope( [layers.conv2d, layers_lib.fully_connected], weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope( [layers.conv2d], weights_initializer=initializers.variance_scaling_initializer(), activation_fn=None, normalizer_fn=layers_lib.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
Example #17
Source File: vgg.py From lambda-packs with MIT License | 6 votes |
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer()): with arg_scope([layers.conv2d], padding='SAME') as arg_sc: return arg_sc
Example #18
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def testClassificationShapes(self): global_pool = True num_classes = 10 inputs = create_test_input(2, 224, 224, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_small( inputs, num_classes, global_pool, scope='resnet') endpoint_to_shape = { 'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32] } for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
Example #19
Source File: tfops.py From glow with MIT License | 6 votes |
def actnorm(name, x, scale=1., logdet=None, logscale_factor=3., batch_variance=False, reverse=False, init=False, trainable=True): if arg_scope([get_variable_ddi], trainable=trainable): if not reverse: x = actnorm_center(name+"_center", x, reverse) x = actnorm_scale(name+"_scale", x, scale, logdet, logscale_factor, batch_variance, reverse, init) if logdet != None: x, logdet = x else: x = actnorm_scale(name + "_scale", x, scale, logdet, logscale_factor, batch_variance, reverse, init) if logdet != None: x, logdet = x x = actnorm_center(name+"_center", x, reverse) if logdet != None: return x, logdet return x # Activation normalization
Example #20
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testEndPointsV1(self): """Test the end points of a tiny v1 bottleneck network.""" bottleneck = resnet_v1.bottleneck blocks = [ resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)]) ] inputs = create_test_input(2, 32, 16, 3) with arg_scope(resnet_utils.resnet_arg_scope()): _, end_points = self._resnet_plain(inputs, blocks, scope='tiny') expected = [ 'tiny/block1/unit_1/bottleneck_v1/shortcut', 'tiny/block1/unit_1/bottleneck_v1/shortcut/BatchNorm', 'tiny/block1/unit_1/bottleneck_v1/conv1', 'tiny/block1/unit_1/bottleneck_v1/conv2', 'tiny/block1/unit_1/bottleneck_v1/conv3', 'tiny/block1/unit_1/bottleneck_v1/conv3/BatchNorm', 'tiny/block1/unit_2/bottleneck_v1/conv1', 'tiny/block1/unit_2/bottleneck_v1/conv2', 'tiny/block1/unit_2/bottleneck_v1/conv3', 'tiny/block1/unit_2/bottleneck_v1/conv3/BatchNorm', 'tiny/block2/unit_1/bottleneck_v1/shortcut', 'tiny/block2/unit_1/bottleneck_v1/shortcut/BatchNorm', 'tiny/block2/unit_1/bottleneck_v1/conv1', 'tiny/block2/unit_1/bottleneck_v1/conv2', 'tiny/block2/unit_1/bottleneck_v1/conv3', 'tiny/block2/unit_1/bottleneck_v1/conv3/BatchNorm', 'tiny/block2/unit_2/bottleneck_v1/conv1', 'tiny/block2/unit_2/bottleneck_v1/conv2', 'tiny/block2/unit_2/bottleneck_v1/conv3', 'tiny/block2/unit_2/bottleneck_v1/conv3/BatchNorm' ] self.assertItemsEqual(expected, end_points)
Example #21
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testFullyConvolutionalUnknownHeightWidth(self): batch = 2 height, width = 65, 65 global_pool = False inputs = create_test_input(batch, None, None, 3) with arg_scope(resnet_utils.resnet_arg_scope()): output, _ = self._resnet_small(inputs, None, global_pool) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 3, 3, 32))
Example #22
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None): """A plain ResNet without extra layers before or after the ResNet blocks.""" with variable_scope.variable_scope(scope, values=[inputs]): with arg_scope([layers.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = utils.convert_collection_to_dict('end_points') return net, end_points
Example #23
Source File: inception_v3_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 299, 299 inputs = random_ops.random_uniform((batch_size, height, width, 3)) with arg_scope(inception_v3.inception_v3_arg_scope()): inception_v3.inception_v3_base(inputs) total_params, _ = model_analyzer.analyze_vars( variables_lib.get_model_variables()) self.assertAlmostEqual(21802784, total_params)
Example #24
Source File: encoder_resnet.py From conv-ensemble-str with Apache License 2.0 | 5 votes |
def __call__(self, features): """ Define tf graph. """ inputs = features['image'] with tf.variable_scope('encoder') as vsc: with slim.arg_scope(resnet_v2.resnet_arg_scope()): # conv1 with arg_scope( [layers_lib.conv2d], activation_fn=None, normalizer_fn=None): net = resnet_utils.conv2d_same(inputs, 16, 5, stride=2, scope='conv1') tf.add_to_collection(vsc.original_name_scope, net) # resnet blocks blocks = [] for i in range(len(self.encoder_params['block_name'])): block = resnet_v2.resnet_v2_block( scope=self.encoder_params['block_name'][i], base_depth=self.encoder_params['base_depth'][i], num_units=self.encoder_params['num_units'][i], stride=self.encoder_params['stride'][i]) blocks.append(block) net, _ = resnet_v2.resnet_v2( net, blocks, is_training=(self.mode == ModeKeys.TRAIN), global_pool=False, output_stride=2, include_root_block=False, scope='resnet') tf.add_to_collection(vsc.original_name_scope, net) return net
Example #25
Source File: layers.py From iaf with MIT License | 5 votes |
def ar_multiconv2d(name, x, context, n_h, n_out, nl=tf.nn.elu, **_): with tf.variable_scope(name), arg_scope([ar_conv2d]): for i, size in enumerate(n_h): x = ar_conv2d("layer_%d" % i, x, size, zerodiagonal=False) if i == 0: x += context x = nl(x) return [ar_conv2d("layer_out_%d" % i, x, size, zerodiagonal=True) for i, size in enumerate(n_out)]
Example #26
Source File: layers.py From iaf with MIT License | 5 votes |
def ar_conv2d(name, x, num_filters, filter_size=(3, 3), stride=(1, 1), pad="SAME", init_scale=1., zerodiagonal=True, **_): h = filter_size[0] w = filter_size[1] n_in = int(x.get_shape()[1]) n_out = num_filters mask = tf.constant(get_conv_ar_mask(h, w, n_in, n_out, zerodiagonal)) with arg_scope([conv2d]): return conv2d(name, x, num_filters, filter_size, stride, pad, init_scale, mask=mask) # Auto-Regressive convnet with l2 normalization
Example #27
Source File: inception_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 224, 224 inputs = random_ops.random_uniform((batch_size, height, width, 3)) with arg_scope(inception_v1.inception_v1_arg_scope()): inception_v1.inception_v1_base(inputs) total_params, _ = model_analyzer.analyze_vars( variables_lib.get_model_variables()) self.assertAlmostEqual(5607184, total_params)
Example #28
Source File: alexnet.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def alexnet_v2_arg_scope(weight_decay=0.0005): with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, biases_initializer=init_ops.constant_initializer(0.1), weights_regularizer=regularizers.l2_regularizer(weight_decay)): with arg_scope([layers.conv2d], padding='SAME'): with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc: return arg_sc
Example #29
Source File: resnet_v1_test.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def testAtrousFullyConvolutionalUnknownHeightWidth(self): batch = 2 height, width = 65, 65 global_pool = False output_stride = 8 inputs = create_test_input(batch, None, None, 3) with arg_scope(resnet_utils.resnet_arg_scope()): output, _ = self._resnet_small( inputs, None, global_pool, output_stride=output_stride) self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32))
Example #30
Source File: overfeat.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def overfeat_arg_scope(weight_decay=0.0005): with arg_scope( [layers.conv2d, layers_lib.fully_connected], activation_fn=nn_ops.relu, weights_regularizer=regularizers.l2_regularizer(weight_decay), biases_initializer=init_ops.zeros_initializer()): with arg_scope([layers.conv2d], padding='SAME'): with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc: return arg_sc