Python tensorflow.contrib.slim.nets.resnet_v1.resnet_v1() Examples

The following are 21 code examples of tensorflow.contrib.slim.nets.resnet_v1.resnet_v1(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.slim.nets.resnet_v1 , or try the search function .
Example #1
Source File: resnet_v1_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _resnet_small(self,
                    inputs,
                    num_classes=None,
                    global_pool=True,
                    output_stride=None,
                    include_root_block=True,
                    reuse=None,
                    scope='resnet_v1_small'):
    """A shallow and thin ResNet v1 for faster tests."""
    bottleneck = resnet_v1.bottleneck
    blocks = [
        resnet_utils.Block(
            'block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]),
        resnet_utils.Block(
            'block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]),
        resnet_utils.Block(
            'block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]),
        resnet_utils.Block(
            'block4', bottleneck, [(32, 8, 1)] * 2)]
    return resnet_v1.resnet_v1(inputs, blocks, num_classes, global_pool,
                               output_stride, include_root_block, reuse, scope) 
Example #2
Source File: resnet_v1_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testEndPointsV1(self):
    """Test the end points of a tiny v1 bottleneck network."""
    bottleneck = resnet_v1.bottleneck
    blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
              resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])]
    inputs = create_test_input(2, 32, 16, 3)
    with slim.arg_scope(resnet_utils.resnet_arg_scope()):
      _, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
    expected = [
        'tiny/block1/unit_1/bottleneck_v1/shortcut',
        'tiny/block1/unit_1/bottleneck_v1/conv1',
        'tiny/block1/unit_1/bottleneck_v1/conv2',
        'tiny/block1/unit_1/bottleneck_v1/conv3',
        'tiny/block1/unit_2/bottleneck_v1/conv1',
        'tiny/block1/unit_2/bottleneck_v1/conv2',
        'tiny/block1/unit_2/bottleneck_v1/conv3',
        'tiny/block2/unit_1/bottleneck_v1/shortcut',
        'tiny/block2/unit_1/bottleneck_v1/conv1',
        'tiny/block2/unit_1/bottleneck_v1/conv2',
        'tiny/block2/unit_1/bottleneck_v1/conv3',
        'tiny/block2/unit_2/bottleneck_v1/conv1',
        'tiny/block2/unit_2/bottleneck_v1/conv2',
        'tiny/block2/unit_2/bottleneck_v1/conv3']
    self.assertItemsEqual(expected, end_points) 
Example #3
Source File: resnet.py    From R2CNN_Faster-RCNN_Tensorflow with MIT License 5 votes vote down vote up
def restnet_head(input, is_training, scope_name):
    block4 = [resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)]

    with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
        C5, _ = resnet_v1.resnet_v1(input,
                                    block4,
                                    global_pool=False,
                                    include_root_block=False,
                                    scope=scope_name)
        # C5 = tf.Print(C5, [tf.shape(C5)], summarize=10, message='C5_shape')
        C5_flatten = tf.reduce_mean(C5, axis=[1, 2], keep_dims=False, name='global_average_pooling')
        # C5_flatten = tf.Print(C5_flatten, [tf.shape(C5_flatten)], summarize=10, message='C5_flatten_shape')

    # global average pooling C5 to obtain fc layers
    return C5_flatten 
Example #4
Source File: flop_regularizer_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def getOp(self, short_name):
    if short_name == 'FC':
      return tf.get_default_graph().get_operation_by_name('FC/MatMul')
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/Conv2D')
    return tf.get_default_graph().get_operation_by_name(name) 
Example #5
Source File: flop_regularizer_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def getGamma(self, short_name):
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/BatchNorm/gamma')
    return self._gammas[name] 
Example #6
Source File: gamma_mapper_test.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def testSuccessResnetV1(self):
    build_resnet(resnet_v1.resnet_v1_block, resnet_v1.resnet_v1)
    mapper = gamma_mapper.ConvGammaMapperByConnectivity()
    # Here the mapping between convolutions and batch-norms is simple one to
    # one.
    for block in (1, 2):
      self.assertGammaMatchesConv(
          mapper, 'resnet_v1/block%d/unit_1/bottleneck_v1/shortcut' % block)

      for unit in (1, 2):
        for conv in (1, 2, 3):
          self.assertGammaMatchesConv(
              mapper, 'resnet_v1/block%d/unit_%d/bottleneck_v1/conv%d' %
              (block, unit, conv)) 
Example #7
Source File: flop_regularizer_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def getOp(self, short_name):
    if short_name == 'FC':
      return tf.get_default_graph().get_operation_by_name('FC/MatMul')
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/Conv2D')
    return tf.get_default_graph().get_operation_by_name(name) 
Example #8
Source File: flop_regularizer_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def getGamma(self, short_name):
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/BatchNorm/gamma')
    return self._gammas[name] 
Example #9
Source File: gamma_mapper_test.py    From g-tensorflow-models with Apache License 2.0 5 votes vote down vote up
def testSuccessResnetV1(self):
    build_resnet(resnet_v1.resnet_v1_block, resnet_v1.resnet_v1)
    mapper = gamma_mapper.ConvGammaMapperByConnectivity()
    # Here the mapping between convolutions and batch-norms is simple one to
    # one.
    for block in (1, 2):
      self.assertGammaMatchesConv(
          mapper, 'resnet_v1/block%d/unit_1/bottleneck_v1/shortcut' % block)

      for unit in (1, 2):
        for conv in (1, 2, 3):
          self.assertGammaMatchesConv(
              mapper, 'resnet_v1/block%d/unit_%d/bottleneck_v1/conv%d' %
              (block, unit, conv)) 
Example #10
Source File: resnet_v1_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAtrousValuesBottleneck(self):
    self._atrousValues(resnet_v1.bottleneck) 
Example #11
Source File: resnet_v1.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #12
Source File: resnet_fusion.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #13
Source File: resnet_v1_noise.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #14
Source File: resnet_fusion_noise.py    From RGB-N with MIT License 5 votes vote down vote up
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc 
Example #15
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def GetOp(self, short_name):
    if short_name == 'FC':
      return tf.get_default_graph().get_operation_by_name('FC/MatMul')
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/Conv2D')
    return tf.get_default_graph().get_operation_by_name(name) 
Example #16
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def GetGamma(self, short_name):
    tokens = short_name.split('/')
    name = ('resnet_v1/block1/' + tokens[0] + '/bottleneck_v1/' + tokens[1] +
            '/BatchNorm/gamma')
    return self._gammas[name] 
Example #17
Source File: resnet.py    From R2CNN-Plus-Plus_Tensorflow with MIT License 5 votes vote down vote up
def restnet_head(input, is_training, scope_name):
    block4 = [resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)]

    with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
        C5, _ = resnet_v1.resnet_v1(input,
                                    block4,
                                    global_pool=False,
                                    include_root_block=False,
                                    scope=scope_name)
        # C5 = tf.Print(C5, [tf.shape(C5)], summarize=10, message='C5_shape')
        C5_flatten = tf.reduce_mean(C5, axis=[1, 2], keep_dims=False, name='global_average_pooling')
        # C5_flatten = tf.Print(C5_flatten, [tf.shape(C5_flatten)], summarize=10, message='C5_flatten_shape')

    # global average pooling C5 to obtain fc layers
    return C5_flatten 
Example #18
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 4 votes vote down vote up
def testCost(self):
    self.BuildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
    self.InitGamma()
    res_alive = np.logical_or(
        np.logical_or(
            self.GetGamma('unit_1/shortcut') > self._threshold,
            self.GetGamma('unit_1/conv3') > self._threshold),
        self.GetGamma('unit_2/conv3') > self._threshold)

    self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
        [self.net.op], self._threshold)

    expected = {}
    expected['unit_1/shortcut'] = (
        self.GetCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
    expected['unit_1/conv1'] = (
        self.GetCoeff('unit_1/conv1') * self.NumAlive('unit_1/conv1') *
        NUM_CHANNELS)
    expected['unit_1/conv2'] = (
        self.GetCoeff('unit_1/conv2') * self.NumAlive('unit_1/conv2') *
        self.NumAlive('unit_1/conv1'))
    expected['unit_1/conv3'] = (
        self.GetCoeff('unit_1/conv3') * np.sum(res_alive) *
        self.NumAlive('unit_1/conv2'))
    expected['unit_2/conv1'] = (
        self.GetCoeff('unit_2/conv1') * self.NumAlive('unit_2/conv1') *
        np.sum(res_alive))
    expected['unit_2/conv2'] = (
        self.GetCoeff('unit_2/conv2') * self.NumAlive('unit_2/conv2') *
        self.NumAlive('unit_2/conv1'))
    expected['unit_2/conv3'] = (
        self.GetCoeff('unit_2/conv3') * np.sum(res_alive) *
        self.NumAlive('unit_2/conv2'))
    expected['FC'] = 2.0 * np.sum(res_alive) * 23.0

    # TODO(e1): Is there a way to use Parametrized Tests to make this more
    # elegant?
    with self.cached_session():
      for short_name in expected:
        cost = self.gamma_flop_reg.get_cost([self.GetOp(short_name)]).eval()
        self.assertEqual(expected[short_name], cost)

      self.assertEqual(
          sum(expected.values()),
          self.gamma_flop_reg.get_cost().eval()) 
Example #19
Source File: flop_regularizer_test.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def testCost(self):
    self.buildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
    self.initGamma()
    res_alive = np.logical_or(
        np.logical_or(
            self.getGamma('unit_1/shortcut') > self._threshold,
            self.getGamma('unit_1/conv3') > self._threshold),
        self.getGamma('unit_2/conv3') > self._threshold)

    self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
        [self.net.op], self._threshold)

    expected = {}
    expected['unit_1/shortcut'] = (
        self.getCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
    expected['unit_1/conv1'] = (
        self.getCoeff('unit_1/conv1') * self.numAlive('unit_1/conv1') *
        NUM_CHANNELS)
    expected['unit_1/conv2'] = (
        self.getCoeff('unit_1/conv2') * self.numAlive('unit_1/conv2') *
        self.numAlive('unit_1/conv1'))
    expected['unit_1/conv3'] = (
        self.getCoeff('unit_1/conv3') * np.sum(res_alive) *
        self.numAlive('unit_1/conv2'))
    expected['unit_2/conv1'] = (
        self.getCoeff('unit_2/conv1') * self.numAlive('unit_2/conv1') *
        np.sum(res_alive))
    expected['unit_2/conv2'] = (
        self.getCoeff('unit_2/conv2') * self.numAlive('unit_2/conv2') *
        self.numAlive('unit_2/conv1'))
    expected['unit_2/conv3'] = (
        self.getCoeff('unit_2/conv3') * np.sum(res_alive) *
        self.numAlive('unit_2/conv2'))
    expected['FC'] = 2.0 * np.sum(res_alive) * 23.0

    # TODO: Is there a way to use Parametrized Tests to make this more
    # elegant?
    with self.test_session():
      for short_name in expected:
        cost = self.gamma_flop_reg.get_cost([self.getOp(short_name)]).eval()
        self.assertEqual(expected[short_name], cost)

      self.assertEqual(
          sum(expected.values()),
          self.gamma_flop_reg.get_cost().eval()) 
Example #20
Source File: resnet.py    From R2CNN_Faster-RCNN_Tensorflow with MIT License 4 votes vote down vote up
def resnet_base(img_batch, scope_name, is_training=True):
    '''
    this code is derived from light-head rcnn.
    https://github.com/zengarden/light_head_rcnn

    It is convenient to freeze blocks. So we adapt this mode.
    '''
    if scope_name == 'resnet_v1_50':
        middle_num_units = 6
    elif scope_name == 'resnet_v1_101':
        middle_num_units = 23
    else:
        raise NotImplementedError('We only support resnet_v1_50 or resnet_v1_101. Check your network name....yjr')

    blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
              resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
              # use stride 1 for the last conv4 layer.

              resnet_v1_block('block3', base_depth=256, num_units=middle_num_units, stride=1)]
              # when use fpn . stride list is [1, 2, 2]

    with slim.arg_scope(resnet_arg_scope(is_training=False)):
        with tf.variable_scope(scope_name, scope_name):
            # Do the first few layers manually, because 'SAME' padding can behave inconsistently
            # for images of different sizes: sometimes 0, sometimes 1
            net = resnet_utils.conv2d_same(
                img_batch, 64, 7, stride=2, scope='conv1')
            net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
            net = slim.max_pool2d(
                net, [3, 3], stride=2, padding='VALID', scope='pool1')

    not_freezed = [False] * cfgs.FIXED_BLOCKS + (4-cfgs.FIXED_BLOCKS)*[True]
    # Fixed_Blocks can be 1~3

    with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[0]))):
        C2, _ = resnet_v1.resnet_v1(net,
                                    blocks[0:1],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope=scope_name)

    # C2 = tf.Print(C2, [tf.shape(C2)], summarize=10, message='C2_shape')

    with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[1]))):
        C3, _ = resnet_v1.resnet_v1(C2,
                                    blocks[1:2],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope=scope_name)

    # C3 = tf.Print(C3, [tf.shape(C3)], summarize=10, message='C3_shape')

    with slim.arg_scope(resnet_arg_scope(is_training=(is_training and not_freezed[2]))):
        C4, _ = resnet_v1.resnet_v1(C3,
                                    blocks[2:3],
                                    global_pool=False,
                                    include_root_block=False,
                                    scope=scope_name)

    # C4 = tf.Print(C4, [tf.shape(C4)], summarize=10, message='C4_shape')
    return C4 
Example #21
Source File: flop_regularizer_test.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def testCost(self):
    self.buildGraphWithBatchNorm(resnet_v1.resnet_v1, resnet_v1.resnet_v1_block)
    self.initGamma()
    res_alive = np.logical_or(
        np.logical_or(
            self.getGamma('unit_1/shortcut') > self._threshold,
            self.getGamma('unit_1/conv3') > self._threshold),
        self.getGamma('unit_2/conv3') > self._threshold)

    self.gamma_flop_reg = flop_regularizer.GammaFlopsRegularizer(
        [self.net.op], self._threshold)

    expected = {}
    expected['unit_1/shortcut'] = (
        self.getCoeff('unit_1/shortcut') * np.sum(res_alive) * NUM_CHANNELS)
    expected['unit_1/conv1'] = (
        self.getCoeff('unit_1/conv1') * self.numAlive('unit_1/conv1') *
        NUM_CHANNELS)
    expected['unit_1/conv2'] = (
        self.getCoeff('unit_1/conv2') * self.numAlive('unit_1/conv2') *
        self.numAlive('unit_1/conv1'))
    expected['unit_1/conv3'] = (
        self.getCoeff('unit_1/conv3') * np.sum(res_alive) *
        self.numAlive('unit_1/conv2'))
    expected['unit_2/conv1'] = (
        self.getCoeff('unit_2/conv1') * self.numAlive('unit_2/conv1') *
        np.sum(res_alive))
    expected['unit_2/conv2'] = (
        self.getCoeff('unit_2/conv2') * self.numAlive('unit_2/conv2') *
        self.numAlive('unit_2/conv1'))
    expected['unit_2/conv3'] = (
        self.getCoeff('unit_2/conv3') * np.sum(res_alive) *
        self.numAlive('unit_2/conv2'))
    expected['FC'] = 2.0 * np.sum(res_alive) * 23.0

    # TODO: Is there a way to use Parametrized Tests to make this more
    # elegant?
    with self.test_session():
      for short_name in expected:
        cost = self.gamma_flop_reg.get_cost([self.getOp(short_name)]).eval()
        self.assertEqual(expected[short_name], cost)

      self.assertEqual(
          sum(expected.values()),
          self.gamma_flop_reg.get_cost().eval())