Python tensorflow.contrib.layers.conv2d() Examples

The following are 30 code examples of tensorflow.contrib.layers.conv2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.layers , or try the search function .
Example #1
Source File: vgg16.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #2
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testStrict_FullParameterizationPasses(self, fallback_rule):
    full_parameterization = {'first/Conv2D': 3, 'second/Conv2D': 13}
    default_num_outputs = 7
    decorator = ops.ConfigurableOps(
        parameterization=full_parameterization, fallback_rule=fallback_rule)
    first = decorator.conv2d(
        self.inputs,
        num_outputs=default_num_outputs,
        kernel_size=3,
        scope='first')
    second = decorator.conv2d(
        self.inputs,
        num_outputs=default_num_outputs,
        kernel_size=1,
        scope='second')

    self.assertAllEqual(3, first.shape.as_list()[3])
    self.assertAllEqual(13, second.shape.as_list()[3]) 
Example #3
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testDifferentParameterization(self, parameterization,
                                    expected_first_shape, expected_conv2_shape):
    alternate_num_outputs = 7
    decorator = ops.ConfigurableOps(parameterization=parameterization)
    with arg_scope([layers.conv2d], padding='VALID'):
      first_out = decorator.conv2d(
          self.inputs,
          num_outputs=alternate_num_outputs,
          kernel_size=3,
          scope='first')
      conv2_out = decorator.conv2d(
          self.inputs,
          num_outputs=alternate_num_outputs,
          kernel_size=1,
          scope='second')
      self.assertAllEqual(expected_first_shape, first_out.shape.as_list())
      self.assertAllEqual(expected_conv2_shape, conv2_out.shape.as_list()) 
Example #4
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testComplexNet(self):
    parameterization = {'Branch0/Conv_1x1/Conv2D': 13, 'Conv3_1x1/Conv2D': 77}
    decorator = ops.ConfigurableOps(parameterization=parameterization)

    def conv2d(inputs, num_outputs, kernel_size, scope):
      return decorator.conv2d(
          inputs, num_outputs=num_outputs, kernel_size=kernel_size, scope=scope)

    net = self.inputs

    with tf.variable_scope('Branch0'):
      branch_0 = conv2d(net, 1, 1, scope='Conv_1x1')
    with tf.variable_scope('Branch1'):
      branch_1 = conv2d(net, 2, 1, scope='Conv_1x1')
      out_2 = conv2d(branch_1, 3, 3, scope='Conv_3x3')
    net = conv2d(net, 1, 1, scope='Conv3_1x1')
    output = tf.concat([net, branch_0, branch_1, out_2], -1)
    expected_output_shape = self.inputs_shape
    expected_output_shape[-1] = 95
    self.assertEqual(expected_output_shape, output.shape.as_list())
    self.assertEqual(2, decorator.constructed_ops['Branch1/Conv_1x1/Conv2D'])
    self.assertEqual(13, decorator.constructed_ops['Branch0/Conv_1x1/Conv2D'])
    self.assertEqual(77, decorator.constructed_ops['Conv3_1x1/Conv2D'])
    self.assertEqual(3, decorator.constructed_ops['Branch1/Conv_3x3/Conv2D']) 
Example #5
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testStrict_PartialParameterizationFails(self):
    partial_parameterization = {'first/Conv2D': 3}
    default_num_outputs = 7
    decorator = ops.ConfigurableOps(
        parameterization=partial_parameterization, fallback_rule='strict')
    decorator.conv2d(
        self.inputs,
        num_outputs=default_num_outputs,
        kernel_size=3,
        scope='first')
    with self.assertRaisesRegexp(
        KeyError, 'op_name \"second/Conv2D\" not found in parameterization'):
      decorator.conv2d(
          self.inputs,
          num_outputs=default_num_outputs,
          kernel_size=1,
          scope='second') 
Example #6
Source File: configurable_ops.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def conv2d(self, *args, **kwargs):
    """Masks num_outputs from the function pointed to by 'conv2d'.

    The object's parameterization has precedence over the given NUM_OUTPUTS
    argument. The resolution of the op names uses
    tf.contrib.framework.get_name_scope() and kwargs['scope'].

    Args:
      *args: Arguments for the operation.
      **kwargs: Key arguments for the operation.

    Returns:
      The result of the application of the function_dict['conv2d'] to the given
      'inputs', '*args' and '**kwargs' while possibly overriding NUM_OUTPUTS
      according the parameterization.

    Raises:
      ValueError: If kwargs does not contain a key named 'scope'.
    """
    fn, suffix = self._get_function_and_suffix('conv2d')
    return self._mask(fn, suffix, *args, **kwargs) 
Example #7
Source File: layers.py    From tensornets with MIT License 6 votes vote down vote up
def darkconv(*args, **kwargs):
    scope = kwargs.pop('scope', None)
    onlyconv = kwargs.pop('onlyconv', False)
    with tf.variable_scope(scope):
        conv_kwargs = {
            'padding': 'SAME',
            'activation_fn': None,
            'weights_initializer': variance_scaling_initializer(1.53846),
            'weights_regularizer': l2(5e-4),
            'biases_initializer': None,
            'scope': 'conv'}
        if onlyconv:
            conv_kwargs.pop('biases_initializer')
        with arg_scope([conv2d], **conv_kwargs):
            x = conv2d(*args, **kwargs)
            if onlyconv: return x
            x = batch_norm(x, decay=0.99, center=False, scale=True,
                           epsilon=1e-5, scope='bn')
            x = bias_add(x, scope='bias')
            x = leaky_relu(x, alpha=0.1, name='lrelu')
            return x 
Example #8
Source File: vgg.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #9
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGetRegularizerForConcatWithNone(self, test_concat, depth):
    image = tf.constant(0.0, shape=[1, 17, 19, 3])
    conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
    other_input = tf.add(
        tf.identity(tf.constant(3.0, shape=[1, 17, 19, depth])), 3.0)
    # other_input has None as regularizer.
    concat = tf.concat([other_input, conv2], 3)
    output = tf.add(concat, concat, name='output_out')
    op = concat.op if test_concat else output.op

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['Conv2D'] = StubConvSourceOpHandler(add_concat_model_stub)
    op_reg_manager = orm.OpRegularizerManager([output.op], op_handler_dict)

    expected_alive = add_concat_model_stub.expected_alive()
    alive = op_reg_manager.get_regularizer(op).alive_vector
    self.assertAllEqual([True] * depth, alive[:depth])
    self.assertAllEqual(expected_alive['conv2'], alive[depth:]) 
Example #10
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGroupingOps(self, tested_op):
    th = 0.5
    image = tf.constant(0.5, shape=[1, 17, 19, 3])

    conv1 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv1')
    conv2 = layers.conv2d(image, 5, [1, 1], padding='SAME', scope='conv2')
    res = tested_op(conv1, conv2)

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['Conv2D'] = RandomConvSourceOpHandler(th)
    op_reg_manager = orm.OpRegularizerManager([res.op], op_handler_dict)

    alive = op_reg_manager.get_regularizer(res.op).alive_vector
    conv1_reg = op_reg_manager.get_regularizer(conv1.op).regularization_vector
    conv2_reg = op_reg_manager.get_regularizer(conv2.op).regularization_vector
    with self.session():
      self.assertAllEqual(alive, np.logical_or(conv1_reg.eval() > th,
                                               conv2_reg.eval() > th)) 
Example #11
Source File: prnet.py    From LipReading with MIT License 6 votes vote down vote up
def resBlock(x, num_outputs, kernel_size=4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm,
    scope=None):
  assert num_outputs % 2 == 0  # num_outputs must be divided by channel_factor(2 here)
  with tf.variable_scope(scope, 'resBlock'):
    shortcut = x
    if stride != 1 or x.get_shape()[3] != num_outputs:
      shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride,
        activation_fn=None, normalizer_fn=None, scope='shortcut')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=1, stride=1, padding='SAME')
    x = tcl.conv2d(x, num_outputs / 2, kernel_size=kernel_size, stride=stride, padding='SAME')
    x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)

    x += shortcut
    x = normalizer_fn(x)
    x = activation_fn(x)
  return x 
Example #12
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGather(self):
    gather_index = [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      gather = tf.gather(c1, gather_index, axis=3)

    manager = orm.OpRegularizerManager(
        [gather.op], self._default_op_handler_dict)

    c1_reg = manager.get_regularizer(_get_op('conv1/Conv2D'))
    gather_reg = manager.get_regularizer(_get_op('GatherV2'))

    # Check regularizer indices.
    self.assertAllEqual(list(range(10)), c1_reg.regularization_vector)
    # This fails due to gather not being supported.  Once gather is supported,
    # this test can be enabled to verify that the regularization vector is
    # gathered in the same ordering as the tensor.
    # self.assertAllEqual(
    #     gather_index, gather_reg.regularization_vector)

    # This test shows that gather is not supported.  The regularization vector
    # has the same initial ordering after the gather op scrambled the
    # channels.  Remove this once gather is supported.
    self.assertAllEqual(list(range(10)), gather_reg.regularization_vector) 
Example #13
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testConcat(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      concat = tf.concat([c1, c2], axis=3)
      tmp = c1 + c2

    manager = orm.OpRegularizerManager(
        [concat.op, tmp.op], self._default_op_handler_dict)

    # Fetch OpSlice to verify grouping.
    inputs_op_slice = manager.get_op_slices(inputs.op)[0]
    c1_op_slice = manager.get_op_slices(c1.op)[0]
    c2_op_slice = manager.get_op_slices(c2.op)[0]
    tmp_op_slice = manager.get_op_slices(tmp.op)[0]
    concat_op_slice0 = manager.get_op_slices(concat.op)[0]
    concat_op_slice1 = manager.get_op_slices(concat.op)[1]

    # Verify inputs and c1 have different group.
    self.assertNotEqual(manager.get_op_group(inputs_op_slice),
                        manager.get_op_group(c1_op_slice))

    # Verify inputs and c2 have different group.
    self.assertNotEqual(manager.get_op_group(inputs_op_slice),
                        manager.get_op_group(c2_op_slice))

    # Verify c1, c2, and add have the same group.
    self.assertEqual(manager.get_op_group(c1_op_slice),
                     manager.get_op_group(c2_op_slice))
    self.assertEqual(manager.get_op_group(c1_op_slice),
                     manager.get_op_group(tmp_op_slice))

    # Verify concat slices are grouped with c1, c2, and add.
    self.assertEqual(manager.get_op_group(c1_op_slice),
                     manager.get_op_group(concat_op_slice0))
    self.assertEqual(manager.get_op_group(c1_op_slice),
                     manager.get_op_group(concat_op_slice1)) 
Example #14
Source File: batch_norm_source_op_handler_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
        },
    }

    with arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #15
Source File: depthwise_convolution_op_handler_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
        },
    }

    with arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #16
Source File: grouping_op_handler_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
        },
    }

    with arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #17
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True
        }
    }

    with arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #18
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testTowerVanishes(self, parameterization):
    depth = self.inputs.shape.as_list()[3]
    decorator = ops.ConfigurableOps(parameterization=parameterization)

    net = decorator.conv2d(
        self.inputs, num_outputs=12, kernel_size=3, scope='first')
    net = decorator.conv2d(
        net, num_outputs=depth, kernel_size=1, scope='second')
    self.assertTrue(ops.is_vanished(net)) 
Example #19
Source File: inception_v2.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #20
Source File: resnet_v1_test.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
    """A plain ResNet without extra layers before or after the ResNet blocks."""
    with variable_scope.variable_scope(scope, values=[inputs]):
      with arg_scope([layers.conv2d], outputs_collections='end_points'):
        net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
        end_points = utils.convert_collection_to_dict('end_points')
        return net, end_points 
Example #21
Source File: output_non_passthrough_op_handler_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True,
        },
    }

    with framework.arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #22
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testScopeAndNameKwargs(self):
    function_dict = {
        'fully_connected': mock_fully_connected,
        'conv2d': mock_conv2d,
        'separable_conv2d': mock_separable_conv2d,
        'concat': mock_concat,
        'add_n': mock_add_n,
    }
    parameterization = {
        'fc/MatMul': 13,
        'conv/Conv2D': 15,
        'sep/separable_conv2d': 17
    }
    num_outputs = lambda res: res['args'][1]
    decorator = ops.ConfigurableOps(
        parameterization=parameterization, function_dict=function_dict)

    conv2d = decorator.conv2d(
        self.inputs, num_outputs=11, kernel_size=3, scope='conv')
    self.assertEqual('myconv2d', conv2d['mock_name'])
    self.assertEqual(parameterization['conv/Conv2D'], num_outputs(conv2d))

    conv2d = decorator.conv2d(
        self.inputs, num_outputs=11, kernel_size=3, name='conv')
    self.assertEqual('myconv2d', conv2d['mock_name'])
    self.assertEqual(parameterization['conv/Conv2D'], num_outputs(conv2d)) 
Example #23
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testMapBinding(self):
    # TODO(e1): Clean up this file/test. Split to different tests
    function_dict = {
        'fully_connected': mock_fully_connected,
        'conv2d': mock_conv2d,
        'separable_conv2d': mock_separable_conv2d,
        'concat': mock_concat,
        'add_n': mock_add_n,
    }
    parameterization = {
        'fc/MatMul': 13,
        'conv/Conv2D': 15,
        'sep/separable_conv2d': 17
    }
    num_outputs = lambda res: res['args'][1]
    decorator = ops.ConfigurableOps(
        parameterization=parameterization, function_dict=function_dict)
    fc = decorator.fully_connected(self.fc_inputs, num_outputs=88, scope='fc')
    self.assertEqual('myfully_connected', fc['mock_name'])
    self.assertEqual(parameterization['fc/MatMul'], num_outputs(fc))

    conv2d = decorator.conv2d(
        self.inputs, num_outputs=11, kernel_size=3, scope='conv')
    self.assertEqual('myconv2d', conv2d['mock_name'])
    self.assertEqual(parameterization['conv/Conv2D'], num_outputs(conv2d))

    separable_conv2d = decorator.separable_conv2d(
        self.inputs, num_outputs=88, kernel_size=3, scope='sep')
    self.assertEqual('myseparable_conv2d', separable_conv2d['mock_name'])
    self.assertEqual(parameterization['sep/separable_conv2d'],
                     num_outputs(separable_conv2d))

    concat = decorator.concat(axis=1, values=[1, None, 2])
    self.assertEqual(concat['args'][0], [1, 2])
    self.assertEqual(concat['kwargs']['axis'], 1)
    with self.assertRaises(ValueError):
      _ = decorator.concat(inputs=[1, None, 2])

    add_n = decorator.add_n(name='add_n', inputs=[1, None, 2])
    self.assertEqual(add_n['args'][0], [1, 2]) 
Example #24
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def constructed_ops(self):
    """Returns a dictionary between op names built to their NUM_OUTPUTS.

       The dictionary will contain an op.name: NUM_OUTPUTS pair for each op
       constructed by the decorator. The dictionary is ordered according to the
       order items were added.
       The parameterization is accumulated during all the calls to the object's
       members, such as `conv2d`, `fully_connected` and `separable_conv2d`.
       The values used are either the values from the parameterization set for
       the object, or the values that where passed to the members.
    """
    return self._constructed_ops 
Example #25
Source File: configurable_ops.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def is_vanished(maybe_tensor):
  """Checks if the argument represents a real tensor or None/vanished sentinel.

  For example:
    `is_vanished(ConfigurableOps({'conv1': 0}).conv2d(...,scope='conv1'))`
  returns True, since 0 channels in a conv2d produces vanished output.

  Args:
    maybe_tensor: A tensor or None or the vanished sentinel.

  Returns:
    A boolean, whether maybe_tensor is a tensor.
  """
  return maybe_tensor == VANISHED or maybe_tensor is None 
Example #26
Source File: cost_calculator_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def test_get_input_activation2(self, rank, fn, op_name):
    g = tf.get_default_graph()
    inputs = tf.zeros([6] * rank)
    with arg_scope([
        layers.conv2d, layers.conv2d_transpose, layers.separable_conv2d,
        layers.conv3d
    ],
                   scope='test_layer'):
      _ = fn(inputs)
    for op in g.get_operations():
      print(op.name)
    self.assertEqual(
        inputs,
        cc.get_input_activation(
            g.get_operation_by_name('test_layer/' + op_name))) 
Example #27
Source File: cost_calculator_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def _batch_norm_scope(self):
    params = {
        'trainable': True,
        'normalizer_fn': layers.batch_norm,
        'normalizer_params': {
            'scale': True
        }
    }

    with arg_scope([layers.conv2d], **params) as sc:
      return sc 
Example #28
Source File: predictor.py    From LipReading with MIT License 5 votes vote down vote up
def resBlock(x, num_outputs, kernel_size = 4, stride=1, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm, scope=None):
    assert num_outputs%2==0 #num_outputs must be divided by channel_factor(2 here)
    with tf.variable_scope(scope, 'resBlock'):
        shortcut = x
        if stride != 1 or x.get_shape()[3] != num_outputs:
            shortcut = tcl.conv2d(shortcut, num_outputs, kernel_size=1, stride=stride, 
                        activation_fn=None, normalizer_fn=None, scope='shortcut')
        x = tcl.conv2d(x, num_outputs/2, kernel_size=1, stride=1, padding='SAME')
        x = tcl.conv2d(x, num_outputs/2, kernel_size=kernel_size, stride=stride, padding='SAME')
        x = tcl.conv2d(x, num_outputs, kernel_size=1, stride=1, activation_fn=None, padding='SAME', normalizer_fn=None)

        x += shortcut       
        x = normalizer_fn(x)
        x = activation_fn(x)
    return x 
Example #29
Source File: overfeat.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def overfeat_arg_scope(weight_decay=0.0005):
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME'):
      with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc 
Example #30
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testGroupingConcat(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=5, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=5, kernel_size=3, scope='conv2')
      concat = tf.concat([c1, c2], axis=2)

    manager = orm.OpRegularizerManager([concat.op],
                                       self._default_op_handler_dict)

    # Fetch OpSlice to verify grouping.
    inputs_op_slice = manager.get_op_slices(inputs.op)[0]
    c1_op_slice = manager.get_op_slices(c1.op)[0]
    c2_op_slice = manager.get_op_slices(c2.op)[0]
    concat_op_slice = manager.get_op_slices(concat.op)[0]

    # Verify inputs and c1 have different group.
    self.assertNotEqual(
        manager.get_op_group(inputs_op_slice),
        manager.get_op_group(c1_op_slice))

    # Verify inputs and c2 have different group.
    self.assertNotEqual(
        manager.get_op_group(inputs_op_slice),
        manager.get_op_group(c2_op_slice))

    # Verify c1, c2, and concat have the same group.
    self.assertEqual(
        manager.get_op_group(c1_op_slice), manager.get_op_group(c2_op_slice))
    self.assertEqual(
        manager.get_op_group(c1_op_slice),
        manager.get_op_group(concat_op_slice))