Python object_detection.utils.shape_utils.combined_static_and_dynamic_shape() Examples

The following are 30 code examples of object_detection.utils.shape_utils.combined_static_and_dynamic_shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module object_detection.utils.shape_utils , or try the search function .
Example #1
Source File: faster_rcnn_meta_arch.py    From Traffic-Rule-Violation-Detection-System with MIT License 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #2
Source File: ops.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
  """Matrix multiplication based implementation of tf.gather on zeroth axis.

  TODO(rathodv, jonathanhuang): enable sparse matmul option.

  Args:
    params: A float32 Tensor. The tensor from which to gather values.
      Must be at least rank 1.
    indices: A Tensor. Must be one of the following types: int32, int64.
      Must be in range [0, params.shape[0])
    scope: A name for the operation (optional).

  Returns:
    A Tensor. Has the same type as params. Values from params gathered
    from indices given by indices, with shape indices.shape + params.shape[1:].
  """
  with tf.name_scope(scope, 'MatMulGather'):
    params_shape = shape_utils.combined_static_and_dynamic_shape(params)
    indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
    params2d = tf.reshape(params, [params_shape[0], -1])
    indicator_matrix = tf.one_hot(indices, params_shape[0])
    gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
    return tf.reshape(gathered_result_flattened,
                      tf.stack(indices_shape + params_shape[1:])) 
Example #3
Source File: faster_rcnn_meta_arch.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #4
Source File: ops.py    From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 6 votes vote down vote up
def nearest_neighbor_upsampling(input_tensor, scale):
  """Nearest neighbor upsampling implementation.

  Nearest neighbor upsampling function that maps input tensor with shape
  [batch_size, height, width, channels] to [batch_size, height * scale
  , width * scale, channels]. This implementation only uses reshape and
  broadcasting to make it TPU compatible.

  Args:
    input_tensor: A float32 tensor of size [batch, height_in, width_in,
      channels].
    scale: An integer multiple to scale resolution of input data.
  Returns:
    data_up: A float32 tensor of size
      [batch, height_in*scale, width_in*scale, channels].
  """
  with tf.name_scope('nearest_neighbor_upsampling'):
    (batch_size, height, width,
     channels) = shape_utils.combined_static_and_dynamic_shape(input_tensor)
    output_tensor = tf.reshape(
        input_tensor, [batch_size, height, 1, width, 1, channels]) * tf.ones(
            [1, 1, scale, 1, scale, 1], dtype=input_tensor.dtype)
    return tf.reshape(output_tensor,
                      [batch_size, height * scale, width * scale, channels]) 
Example #5
Source File: ops.py    From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 6 votes vote down vote up
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
  """Matrix multiplication based implementation of tf.gather on zeroth axis.

  TODO(rathodv, jonathanhuang): enable sparse matmul option.

  Args:
    params: A float32 Tensor. The tensor from which to gather values.
      Must be at least rank 1.
    indices: A Tensor. Must be one of the following types: int32, int64.
      Must be in range [0, params.shape[0])
    scope: A name for the operation (optional).

  Returns:
    A Tensor. Has the same type as params. Values from params gathered
    from indices given by indices, with shape indices.shape + params.shape[1:].
  """
  with tf.name_scope(scope, 'MatMulGather'):
    params_shape = shape_utils.combined_static_and_dynamic_shape(params)
    indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
    params2d = tf.reshape(params, [params_shape[0], -1])
    indicator_matrix = tf.one_hot(indices, params_shape[0])
    gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
    return tf.reshape(gathered_result_flattened,
                      tf.stack(indices_shape + params_shape[1:])) 
Example #6
Source File: ops.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def nearest_neighbor_upsampling(input_tensor, scale):
  """Nearest neighbor upsampling implementation.

  Nearest neighbor upsampling function that maps input tensor with shape
  [batch_size, height, width, channels] to [batch_size, height * scale
  , width * scale, channels]. This implementation only uses reshape and tile to
  make it compatible with certain hardware.

  Args:
    input_tensor: A float32 tensor of size [batch, height_in, width_in,
      channels].
    scale: An integer multiple to scale resolution of input data.
  Returns:
    data_up: A float32 tensor of size
      [batch, height_in*scale, width_in*scale, channels].
  """
  shape = shape_utils.combined_static_and_dynamic_shape(input_tensor)
  shape_before_tile = [shape[0], shape[1], 1, shape[2], 1, shape[3]]
  shape_after_tile = [shape[0], shape[1] * scale, shape[2] * scale, shape[3]]
  data_reshaped = tf.reshape(input_tensor, shape_before_tile)
  resized_tensor = tf.tile(data_reshaped, [1, 1, scale, 1, scale, 1])
  resized_tensor = tf.reshape(resized_tensor, shape_after_tile)
  return resized_tensor 
Example #7
Source File: ops.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
  """Matrix multiplication based implementation of tf.gather on zeroth axis.

  TODO(rathodv, jonathanhuang): enable sparse matmul option.

  Args:
    params: A float32 Tensor. The tensor from which to gather values.
      Must be at least rank 1.
    indices: A Tensor. Must be one of the following types: int32, int64.
      Must be in range [0, params.shape[0])
    scope: A name for the operation (optional).

  Returns:
    A Tensor. Has the same type as params. Values from params gathered
    from indices given by indices, with shape indices.shape + params.shape[1:].
  """
  with tf.name_scope(scope, 'MatMulGather'):
    params_shape = shape_utils.combined_static_and_dynamic_shape(params)
    indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
    params2d = tf.reshape(params, [params_shape[0], -1])
    indicator_matrix = tf.one_hot(indices, params_shape[0])
    gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
    return tf.reshape(gathered_result_flattened,
                      tf.stack(indices_shape + params_shape[1:])) 
Example #8
Source File: faster_rcnn_meta_arch.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #9
Source File: faster_rcnn_meta_arch.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #10
Source File: ssd_meta_arch.py    From garbage-object-detection-tensorflow with MIT License 6 votes vote down vote up
def _batch_decode(self, box_encodings):
    """Decodes a batch of box encodings with respect to the anchors.

    Args:
      box_encodings: A float32 tensor of shape
        [batch_size, num_anchors, box_code_size] containing box encodings.

    Returns:
      decoded_boxes: A float32 tensor of shape
        [batch_size, num_anchors, 4] containing the decoded boxes.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(
        box_encodings)
    batch_size = combined_shape[0]
    tiled_anchor_boxes = tf.tile(
        tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
    tiled_anchors_boxlist = box_list.BoxList(
        tf.reshape(tiled_anchor_boxes, [-1, self._box_coder.code_size]))
    decoded_boxes = self._box_coder.decode(
        tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
        tiled_anchors_boxlist)
    return tf.reshape(decoded_boxes.get(),
                      tf.stack([combined_shape[0], combined_shape[1],
                                4])) 
Example #11
Source File: faster_rcnn_meta_arch.py    From garbage-object-detection-tensorflow with MIT License 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #12
Source File: faster_rcnn_meta_arch.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def _flatten_first_two_dimensions(self, inputs):
    """Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.

    Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
    [A * B, ..., depth].

    Args:
      inputs: A float tensor with shape [A, B, ..., depth].  Note that the first
        two and last dimensions must be statically defined.
    Returns:
      A float tensor with shape [A * B, ..., depth] (where the first and last
        dimension are statically defined.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
    flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
                               combined_shape[2:])
    return tf.reshape(inputs, flattened_shape) 
Example #13
Source File: ops.py    From Traffic-Rule-Violation-Detection-System with MIT License 6 votes vote down vote up
def nearest_neighbor_upsampling(input_tensor, scale):
  """Nearest neighbor upsampling implementation.

  Nearest neighbor upsampling function that maps input tensor with shape
  [batch_size, height, width, channels] to [batch_size, height * scale
  , width * scale, channels]. This implementation only uses reshape and tile to
  make it compatible with certain hardware.

  Args:
    input_tensor: A float32 tensor of size [batch, height_in, width_in,
      channels].
    scale: An integer multiple to scale resolution of input data.
  Returns:
    data_up: A float32 tensor of size
      [batch, height_in*scale, width_in*scale, channels].
  """
  shape = shape_utils.combined_static_and_dynamic_shape(input_tensor)
  shape_before_tile = [shape[0], shape[1], 1, shape[2], 1, shape[3]]
  shape_after_tile = [shape[0], shape[1] * scale, shape[2] * scale, shape[3]]
  data_reshaped = tf.reshape(input_tensor, shape_before_tile)
  resized_tensor = tf.tile(data_reshaped, [1, 1, scale, 1, scale, 1])
  resized_tensor = tf.reshape(resized_tensor, shape_after_tile)
  return resized_tensor 
Example #14
Source File: ops.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def nearest_neighbor_upsampling(input_tensor, scale):
  """Nearest neighbor upsampling implementation.

  Nearest neighbor upsampling function that maps input tensor with shape
  [batch_size, height, width, channels] to [batch_size, height * scale
  , width * scale, channels]. This implementation only uses reshape and
  broadcasting to make it TPU compatible.

  Args:
    input_tensor: A float32 tensor of size [batch, height_in, width_in,
      channels].
    scale: An integer multiple to scale resolution of input data.
  Returns:
    data_up: A float32 tensor of size
      [batch, height_in*scale, width_in*scale, channels].
  """
  with tf.name_scope('nearest_neighbor_upsampling'):
    (batch_size, height, width,
     channels) = shape_utils.combined_static_and_dynamic_shape(input_tensor)
    output_tensor = tf.reshape(
        input_tensor, [batch_size, height, 1, width, 1, channels]) * tf.ones(
            [1, 1, scale, 1, scale, 1], dtype=input_tensor.dtype)
    return tf.reshape(output_tensor,
                      [batch_size, height * scale, width * scale, channels]) 
Example #15
Source File: ops.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
  """Matrix multiplication based implementation of tf.gather on zeroth axis.

  TODO(rathodv, jonathanhuang): enable sparse matmul option.

  Args:
    params: A float32 Tensor. The tensor from which to gather values.
      Must be at least rank 1.
    indices: A Tensor. Must be one of the following types: int32, int64.
      Must be in range [0, params.shape[0])
    scope: A name for the operation (optional).

  Returns:
    A Tensor. Has the same type as params. Values from params gathered
    from indices given by indices, with shape indices.shape + params.shape[1:].
  """
  with tf.name_scope(scope, 'MatMulGather'):
    params_shape = shape_utils.combined_static_and_dynamic_shape(params)
    indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
    params2d = tf.reshape(params, [params_shape[0], -1])
    indicator_matrix = tf.one_hot(indices, params_shape[0])
    gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
    return tf.reshape(gathered_result_flattened,
                      tf.stack(indices_shape + params_shape[1:])) 
Example #16
Source File: box_list_ops.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def select_random_box(boxlist,
                      default_box=None,
                      seed=None,
                      scope=None):
  """Selects a random bounding box from a `BoxList`.

  Args:
    boxlist: A BoxList.
    default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
      this default box will be returned. If None, will use a default box of
      [[-1., -1., -1., -1.]].
    seed: Random seed.
    scope: Name scope.

  Returns:
    bbox: A [1, 4] tensor with a random bounding box.
    valid: A bool tensor indicating whether a valid bounding box is returned
      (True) or whether the default box is returned (False).
  """
  with tf.name_scope(scope, 'SelectRandomBox'):
    bboxes = boxlist.get()
    combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)
    number_of_boxes = combined_shape[0]
    default_box = default_box or tf.constant([[-1., -1., -1., -1.]])

    def select_box():
      random_index = tf.random_uniform([],
                                       maxval=number_of_boxes,
                                       dtype=tf.int32,
                                       seed=seed)
      return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)

  return tf.cond(
      tf.greater_equal(number_of_boxes, 1),
      true_fn=select_box,
      false_fn=lambda: (default_box, tf.constant(False))) 
Example #17
Source File: faster_rcnn_meta_arch.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
    """Decodes box encodings with respect to the anchor boxes.

    Args:
      box_encodings: a 4-D tensor with shape
        [batch_size, num_anchors, num_classes, self._box_coder.code_size]
        representing box encodings.
      anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]
        representing decoded bounding boxes. If using a shared box across
        classes the shape will instead be
        [total_num_proposals, 1, self._box_coder.code_size].

    Returns:
      decoded_boxes: a
        [batch_size, num_anchors, num_classes, self._box_coder.code_size]
        float tensor representing bounding box predictions (for each image in
        batch, proposal and class). If using a shared box across classes the
        shape will instead be
        [batch_size, num_anchors, 1, self._box_coder.code_size].
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(
        box_encodings)
    num_classes = combined_shape[2]
    tiled_anchor_boxes = tf.tile(
        tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
    tiled_anchors_boxlist = box_list.BoxList(
        tf.reshape(tiled_anchor_boxes, [-1, 4]))
    decoded_boxes = self._box_coder.decode(
        tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
        tiled_anchors_boxlist)
    return tf.reshape(decoded_boxes.get(),
                      tf.stack([combined_shape[0], combined_shape[1],
                                num_classes, 4])) 
Example #18
Source File: shape_utils_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_unequal_static_shape_along_first_dim_raises_exception(self):
    shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
    shape_b = tf.constant(np.zeros([6, 2, 3, 1]))
    with self.assertRaisesRegexp(
        ValueError, 'Unequal first dimension'):
      shape_utils.assert_shape_equal_along_first_dimension(
          shape_utils.combined_static_and_dynamic_shape(shape_a),
          shape_utils.combined_static_and_dynamic_shape(shape_b)) 
Example #19
Source File: ssd_meta_arch.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def _compute_clip_window(self, preprocessed_images, true_image_shapes):
    """Computes clip window to use during post_processing.

    Computes a new clip window to use during post-processing based on
    `resized_image_shapes` and `true_image_shapes` only if `preprocess` method
    has been called. Otherwise returns a default clip window of [0, 0, 1, 1].

    Args:
      preprocessed_images: the [batch, height, width, channels] image
          tensor.
      true_image_shapes: int32 tensor of shape [batch, 3] where each row is
        of the form [height, width, channels] indicating the shapes
        of true images in the resized images, as resized images can be padded
        with zeros. Or None if the clip window should cover the full image.

    Returns:
      a 2-D float32 tensor of the form [batch_size, 4] containing the clip
      window for each image in the batch in normalized coordinates (relative to
      the resized dimensions) where each clip window is of the form [ymin, xmin,
      ymax, xmax] or a default clip window of [0, 0, 1, 1].

    """
    if true_image_shapes is None:
      return tf.constant([0, 0, 1, 1], dtype=tf.float32)

    resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
        preprocessed_images)
    true_heights, true_widths, _ = tf.unstack(
        tf.to_float(true_image_shapes), axis=1)
    padded_height = tf.to_float(resized_inputs_shape[1])
    padded_width = tf.to_float(resized_inputs_shape[2])
    return tf.stack(
        [
            tf.zeros_like(true_heights),
            tf.zeros_like(true_widths), true_heights / padded_height,
            true_widths / padded_width
        ],
        axis=1) 
Example #20
Source File: shape_utils_test.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def test_equal_dynamic_shape_along_first_dim_succeeds(self):
    tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3])
    tensor_b = tf.placeholder(tf.float32, shape=[None])
    op = shape_utils.assert_shape_equal_along_first_dimension(
        shape_utils.combined_static_and_dynamic_shape(tensor_a),
        shape_utils.combined_static_and_dynamic_shape(tensor_b))
    with self.test_session() as sess:
      sess.run(op, feed_dict={tensor_a: np.zeros([5, 2, 2, 3]),
                              tensor_b: np.zeros([5])}) 
Example #21
Source File: shape_utils_test.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def test_unequal_dynamic_shape_along_first_dim_raises_tf_assert(self):
    tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3])
    tensor_b = tf.placeholder(tf.float32, shape=[None, None, 3])
    op = shape_utils.assert_shape_equal_along_first_dimension(
        shape_utils.combined_static_and_dynamic_shape(tensor_a),
        shape_utils.combined_static_and_dynamic_shape(tensor_b))
    with self.test_session() as sess:
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]),
                                tensor_b: np.zeros([2, 4, 3])}) 
Example #22
Source File: ssd_meta_arch.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def _get_feature_map_spatial_dims(self, feature_maps):
    """Return list of spatial dimensions for each feature map in a list.

    Args:
      feature_maps: a list of tensors where the ith tensor has shape
          [batch, height_i, width_i, depth_i].

    Returns:
      a list of pairs (height, width) for each feature map in feature_maps
    """
    feature_map_shapes = [
        shape_utils.combined_static_and_dynamic_shape(
            feature_map) for feature_map in feature_maps
    ]
    return [(shape[1], shape[2]) for shape in feature_map_shapes] 
Example #23
Source File: shape_utils_test.py    From garbage-object-detection-tensorflow with MIT License 5 votes vote down vote up
def test_combines_static_dynamic_shape(self):
    tensor = tf.placeholder(tf.float32, shape=(None, 2, 3))
    combined_shape = shape_utils.combined_static_and_dynamic_shape(
        tensor)
    self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0]))
    self.assertListEqual(combined_shape[1:], [2, 3]) 
Example #24
Source File: test_utils.py    From garbage-object-detection-tensorflow with MIT License 5 votes vote down vote up
def _predict(self, image_features, num_predictions_per_location):
    combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
        image_features)
    batch_size = combined_feature_shape[0]
    num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
    code_size = 4
    zero = tf.reduce_sum(0 * image_features)
    box_encodings = zero + tf.zeros(
        (batch_size, num_anchors, 1, code_size), dtype=tf.float32)
    class_predictions_with_background = zero + tf.zeros(
        (batch_size, num_anchors, self.num_classes + 1), dtype=tf.float32)
    return {box_predictor.BOX_ENCODINGS: box_encodings,
            box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
            class_predictions_with_background} 
Example #25
Source File: ssd_meta_arch.py    From garbage-object-detection-tensorflow with MIT License 5 votes vote down vote up
def _get_feature_map_spatial_dims(self, feature_maps):
    """Return list of spatial dimensions for each feature map in a list.

    Args:
      feature_maps: a list of tensors where the ith tensor has shape
          [batch, height_i, width_i, depth_i].

    Returns:
      a list of pairs (height, width) for each feature map in feature_maps
    """
    feature_map_shapes = [
        shape_utils.combined_static_and_dynamic_shape(
            feature_map) for feature_map in feature_maps
    ]
    return [(shape[1], shape[2]) for shape in feature_map_shapes] 
Example #26
Source File: shape_utils_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_equal_dynamic_shape_succeeds(self):
    tensor_a = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    tensor_b = tf.placeholder(tf.float32, shape=[1, None, None, 3])
    op = shape_utils.assert_shape_equal(
        shape_utils.combined_static_and_dynamic_shape(tensor_a),
        shape_utils.combined_static_and_dynamic_shape(tensor_b))
    with self.test_session() as sess:
      sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]),
                              tensor_b: np.zeros([1, 2, 2, 3])}) 
Example #27
Source File: shape_utils_test.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def test_equal_static_shape_succeeds(self):
    shape_a = tf.constant(np.zeros([4, 2, 2, 1]))
    shape_b = tf.constant(np.zeros([4, 2, 2, 1]))
    with self.test_session() as sess:
      op = shape_utils.assert_shape_equal(
          shape_utils.combined_static_and_dynamic_shape(shape_a),
          shape_utils.combined_static_and_dynamic_shape(shape_b))
      sess.run(op) 
Example #28
Source File: shape_utils_test.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def test_combines_static_dynamic_shape(self):
    tensor = tf.placeholder(tf.float32, shape=(None, 2, 3))
    combined_shape = shape_utils.combined_static_and_dynamic_shape(
        tensor)
    self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0]))
    self.assertListEqual(combined_shape[1:], [2, 3]) 
Example #29
Source File: ssd_meta_arch.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def _batch_decode(self, box_encodings):
    """Decodes a batch of box encodings with respect to the anchors.

    Args:
      box_encodings: A float32 tensor of shape
        [batch_size, num_anchors, box_code_size] containing box encodings.

    Returns:
      decoded_boxes: A float32 tensor of shape
        [batch_size, num_anchors, 4] containing the decoded boxes.
      decoded_keypoints: A float32 tensor of shape
        [batch_size, num_anchors, num_keypoints, 2] containing the decoded
        keypoints if present in the input `box_encodings`, None otherwise.
    """
    combined_shape = shape_utils.combined_static_and_dynamic_shape(
        box_encodings)
    batch_size = combined_shape[0]
    tiled_anchor_boxes = tf.tile(
        tf.expand_dims(self.anchors.get(), 0), [batch_size, 1, 1])
    tiled_anchors_boxlist = box_list.BoxList(
        tf.reshape(tiled_anchor_boxes, [-1, 4]))
    decoded_boxes = self._box_coder.decode(
        tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
        tiled_anchors_boxlist)
    decoded_keypoints = None
    if decoded_boxes.has_field(fields.BoxListFields.keypoints):
      decoded_keypoints = decoded_boxes.get_field(
          fields.BoxListFields.keypoints)
      num_keypoints = decoded_keypoints.get_shape()[1]
      decoded_keypoints = tf.reshape(
          decoded_keypoints,
          tf.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
    decoded_boxes = tf.reshape(decoded_boxes.get(), tf.stack(
        [combined_shape[0], combined_shape[1], 4]))
    return decoded_boxes, decoded_keypoints 
Example #30
Source File: test_utils.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def _predict(self, image_features, num_predictions_per_location):
    combined_feature_shape = shape_utils.combined_static_and_dynamic_shape(
        image_features)
    batch_size = combined_feature_shape[0]
    num_anchors = (combined_feature_shape[1] * combined_feature_shape[2])
    code_size = 4
    zero = tf.reduce_sum(0 * image_features)
    box_encodings = zero + tf.zeros(
        (batch_size, num_anchors, 1, code_size), dtype=tf.float32)
    class_predictions_with_background = zero + tf.zeros(
        (batch_size, num_anchors, self.num_classes + 1), dtype=tf.float32)
    return {box_predictor.BOX_ENCODINGS: box_encodings,
            box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND:
            class_predictions_with_background}