Python nets.nasnet.nasnet.py() Examples

The following are 30 code examples of nets.nasnet.nasnet.py(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module nets.nasnet.nasnet , or try the search function .
Example #1
Source File: faster_rcnn_nas_feature_extractor.py    From g-tensorflow-models with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #2
Source File: faster_rcnn_nas_feature_extractor.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #3
Source File: faster_rcnn_nas_feature_extractor.py    From object_detection_with_tensorflow with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #4
Source File: faster_rcnn_nas_feature_extractor.py    From Elphas with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #5
Source File: faster_rcnn_nas_feature_extractor.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #6
Source File: faster_rcnn_nas_feature_extractor.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #7
Source File: faster_rcnn_nas_feature_extractor.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #8
Source File: faster_rcnn_nas_feature_extractor.py    From MAX-Object-Detector with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #9
Source File: faster_rcnn_nas_feature_extractor.py    From ros_tensorflow with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #10
Source File: faster_rcnn_nas_feature_extractor.py    From Gun-Detector with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #11
Source File: faster_rcnn_nas_feature_extractor.py    From Traffic-Rule-Violation-Detection-System with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #12
Source File: faster_rcnn_nas_feature_extractor.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #13
Source File: faster_rcnn_nas_feature_extractor.py    From models with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #14
Source File: faster_rcnn_nas_feature_extractor.py    From cartoonify with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #15
Source File: faster_rcnn_nas_feature_extractor.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #16
Source File: faster_rcnn_nas_feature_extractor.py    From multilabel-image-classification-tensorflow with MIT License 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #17
Source File: faster_rcnn_nas_feature_extractor.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def nasnet_large_arg_scope_for_detection(is_batch_norm_training=False):
  """Defines the default arg scope for the NASNet-A Large for object detection.

  This provides a small edit to switch batch norm training on and off.

  Args:
    is_batch_norm_training: Boolean indicating whether to train with batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  imagenet_scope = nasnet.nasnet_large_arg_scope()
  with arg_scope(imagenet_scope):
    with arg_scope([slim.batch_norm], is_training=is_batch_norm_training) as sc:
      return sc


# Note: This is largely a copy of _build_nasnet_base inside nasnet.py but
# with special edits to remove instantiation of the stem and the special
# ability to receive as input a pair of hidden states. 
Example #18
Source File: faster_rcnn_nas_feature_extractor.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #19
Source File: faster_rcnn_nas_feature_extractor.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #20
Source File: faster_rcnn_nas_feature_extractor.py    From MAX-Object-Detector with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #21
Source File: faster_rcnn_nas_feature_extractor.py    From models with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #22
Source File: faster_rcnn_nas_feature_extractor.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      _, end_points = nasnet.build_nasnet_large(
          preprocessed_inputs, num_classes=None,
          is_training=self._is_training,
          final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map 
Example #23
Source File: faster_rcnn_nas_feature_extractor.py    From multilabel-image-classification-tensorflow with MIT License 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #24
Source File: faster_rcnn_nas_feature_extractor.py    From object_detection_with_tensorflow with MIT License 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      _, end_points = nasnet.build_nasnet_large(
          preprocessed_inputs, num_classes=None,
          is_training=self._is_training,
          final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map 
Example #25
Source File: faster_rcnn_nas_feature_extractor.py    From Elphas with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      _, end_points = nasnet.build_nasnet_large(
          preprocessed_inputs, num_classes=None,
          is_training=self._is_training,
          final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map 
Example #26
Source File: faster_rcnn_nas_feature_extractor.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #27
Source File: faster_rcnn_nas_feature_extractor.py    From ros_tensorflow with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #28
Source File: faster_rcnn_nas_feature_extractor.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      end_points: A dictionary mapping feature extractor tensor names to tensors

    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      with arg_scope([slim.conv2d,
                      slim.batch_norm,
                      slim.separable_conv2d],
                     reuse=self._reuse_weights):
        _, end_points = nasnet.build_nasnet_large(
            preprocessed_inputs, num_classes=None,
            is_training=self._is_training,
            final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map, end_points 
Example #29
Source File: faster_rcnn_nas_feature_extractor.py    From Traffic-Rule-Violation-Detection-System with MIT License 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      _, end_points = nasnet.build_nasnet_large(
          preprocessed_inputs, num_classes=None,
          is_training=self._is_training,
          final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map 
Example #30
Source File: faster_rcnn_nas_feature_extractor.py    From yolo_v2 with Apache License 2.0 4 votes vote down vote up
def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Extracts features using the first half of the NASNet network.
    We construct the network in `align_feature_maps=True` mode, which means
    that all VALID paddings in the network are changed to SAME padding so that
    the feature maps are aligned.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
    Raises:
      ValueError: If the created network is missing the required activation.
    """
    del scope

    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())

    with slim.arg_scope(nasnet_large_arg_scope_for_detection(
        is_batch_norm_training=self._train_batch_norm)):
      _, end_points = nasnet.build_nasnet_large(
          preprocessed_inputs, num_classes=None,
          is_training=self._is_training,
          final_endpoint='Cell_11')

    # Note that both 'Cell_10' and 'Cell_11' have equal depth = 2016.
    rpn_feature_map = tf.concat([end_points['Cell_10'],
                                 end_points['Cell_11']], 3)

    # nasnet.py does not maintain the batch size in the first dimension.
    # This work around permits us retaining the batch for below.
    batch = preprocessed_inputs.get_shape().as_list()[0]
    shape_without_batch = rpn_feature_map.get_shape().as_list()[1:]
    rpn_feature_map_shape = [batch] + shape_without_batch
    rpn_feature_map.set_shape(rpn_feature_map_shape)

    return rpn_feature_map