Python object_detection.core.box_list_ops.scale() Examples
The following are 30
code examples of object_detection.core.box_list_ops.scale().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.core.box_list_ops
, or try the search function
.
Example #1
Source File: center_net_meta_arch.py From models with Apache License 2.0 | 6 votes |
def __new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight=1.0): """Constructor with default values for ObjectDetectionParams. Args: localization_loss: a object_detection.core.losses.Loss object to compute the loss for the center offset and height/width predictions in CenterNet. scale_loss_weight: float, The weight for localizing box size. Note that the scale loss is dependent on the input image size, since we penalize the raw height and width. This constant may need to be adjusted depending on the input size. offset_loss_weight: float, The weight for localizing center offsets. task_loss_weight: float, the weight of the object detection loss. Returns: An initialized ObjectDetectionParams namedtuple. """ return super(ObjectDetectionParams, cls).__new__(cls, localization_loss, scale_loss_weight, offset_loss_weight, task_loss_weight)
Example #2
Source File: center_net_meta_arch.py From models with Apache License 2.0 | 6 votes |
def preprocess(self, inputs): """Converts a batch of unscaled images to a scale suitable for the model. This method normalizes the image using the given `channel_means` and `channels_stds` values at initialization time while optionally flipping the channel order if `bgr_ordering` is set. Args: inputs: a [batch, height, width, channels] float32 tensor Returns: outputs: a [batch, height, width, channels] float32 tensor """ if self._bgr_ordering: red, green, blue = tf.unstack(inputs, axis=3) inputs = tf.stack([blue, green, red], axis=3) channel_means = tf.reshape(tf.constant(self._channel_means), [1, 1, 1, -1]) channel_stds = tf.reshape(tf.constant(self._channel_stds), [1, 1, 1, -1]) return (inputs - channel_means)/channel_stds
Example #3
Source File: preprocessor.py From object_detection_with_tensorflow with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #4
Source File: box_list_ops_test.py From Accident-Detection-on-Indian-Roads with GNU Affero General Public License v3.0 | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #5
Source File: preprocessor.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield
Example #6
Source File: box_list_ops_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #7
Source File: center_net_meta_arch.py From models with Apache License 2.0 | 5 votes |
def _compute_object_detection_losses(self, input_height, input_width, prediction_dict, per_pixel_weights): """Computes the weighted object detection losses. This wrapper function calls the function which computes the losses for object detection task and applies corresponding weights to the losses. Args: input_height: An integer scalar tensor representing input image height. input_width: An integer scalar tensor representing input image width. prediction_dict: A dictionary holding predicted tensors output by "predict" function. See "predict" function for more detailed description. per_pixel_weights: A float tensor of shape [batch_size, out_height * out_width, 1] with 1s in locations where the spatial coordinates fall within the height and width in true_image_shapes. Returns: A dictionary of scalar float tensors representing the weighted losses for object detection task: BOX_SCALE: the weighted scale (height/width) loss. BOX_OFFSET: the weighted object offset loss. """ od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss( scale_predictions=prediction_dict[BOX_SCALE], offset_predictions=prediction_dict[BOX_OFFSET], input_height=input_height, input_width=input_width) loss_dict = {} loss_dict[BOX_SCALE] = ( self._od_params.scale_loss_weight * od_scale_loss) loss_dict[BOX_OFFSET] = ( self._od_params.offset_loss_weight * od_offset_loss) return loss_dict
Example #8
Source File: box_list_ops_test.py From models with Apache License 2.0 | 5 votes |
def test_scale(self): def graph_fn(): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) return scaled_boxes.get(), scaled_boxes.get_field('extra_data') scaled_corners_out, extra_data_out = self.execute(graph_fn, []) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] self.assertAllClose(scaled_corners_out, exp_output) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #9
Source File: preprocessor.py From models with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield
Example #10
Source File: box_list_ops_test.py From motion-rcnn with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #11
Source File: preprocessor.py From motion-rcnn with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #12
Source File: box_list_ops_test.py From mtl-ssl with Apache License 2.0 | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #13
Source File: preprocessor.py From mtl-ssl with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #14
Source File: center_net_meta_arch.py From models with Apache License 2.0 | 5 votes |
def convert_strided_predictions_to_normalized_boxes(boxes, stride, true_image_shapes): """Converts predictions in the output space to normalized boxes. Boxes falling outside the valid image boundary are clipped to be on the boundary. Args: boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw coordinates of boxes in the model's output space. stride: The stride in the output space. true_image_shapes: A tensor of shape [batch_size, 3] representing the true shape of the input not considering padding. Returns: boxes: A tensor of shape [batch_size, num_boxes, 4] representing the coordinates of the normalized boxes. """ def _normalize_boxlist(args): boxes, height, width = args boxes = box_list_ops.scale(boxes, stride, stride) boxes = box_list_ops.to_normalized_coordinates(boxes, height, width) boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.], filter_nonoverlapping=False) return boxes box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)] true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1) true_heights_list = tf.unstack(true_heights, axis=0) true_widths_list = tf.unstack(true_widths, axis=0) box_lists = list(map(_normalize_boxlist, zip(box_lists, true_heights_list, true_widths_list))) boxes = tf.stack([box_list_instance.get() for box_list_instance in box_lists], axis=0) return boxes
Example #15
Source File: preprocessor.py From monopsr with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield
Example #16
Source File: box_list_ops_test.py From monopsr with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0 / 100) x_scale = tf.constant(1.0 / 200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #17
Source File: box_list_ops_test.py From object_detection_with_tensorflow with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #18
Source File: preprocessor.py From AniSeg with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield
Example #19
Source File: box_list_ops_test.py From object_detection_with_tensorflow with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #20
Source File: preprocessor.py From Elphas with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO: Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield
Example #21
Source File: box_list_ops_test.py From Elphas with Apache License 2.0 | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #22
Source File: preprocessor.py From MBMD with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #23
Source File: box_list_ops_test.py From MBMD with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #24
Source File: preprocessor.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #25
Source File: box_list_ops_test.py From DOTA_models with Apache License 2.0 | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #26
Source File: preprocessor.py From hands-detection with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #27
Source File: box_list_ops_test.py From hands-detection with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #28
Source File: preprocessor.py From moveo_ros with MIT License | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # pylint: disable=g-doc-return-or-yield
Example #29
Source File: box_list_ops_test.py From moveo_ros with MIT License | 5 votes |
def test_scale(self): corners = tf.constant([[0, 0, 100, 200], [50, 120, 100, 140]], dtype=tf.float32) boxes = box_list.BoxList(corners) boxes.add_field('extra_data', tf.constant([[1], [2]])) y_scale = tf.constant(1.0/100) x_scale = tf.constant(1.0/200) scaled_boxes = box_list_ops.scale(boxes, y_scale, x_scale) exp_output = [[0, 0, 1, 1], [0.5, 0.6, 1.0, 0.7]] with self.test_session() as sess: scaled_corners_out = sess.run(scaled_boxes.get()) self.assertAllClose(scaled_corners_out, exp_output) extra_data_out = sess.run(scaled_boxes.get_field('extra_data')) self.assertAllEqual(extra_data_out, [[1], [2]])
Example #30
Source File: preprocessor.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield