Python object_detection.utils.ops.batch_position_sensitive_crop_regions() Examples
The following are 21
code examples of object_detection.utils.ops.batch_position_sensitive_crop_regions().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.utils.ops
, or try the search function
.
Example #1
Source File: ops_test.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #2
Source File: ops_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #3
Source File: ops_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #4
Source File: ops_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #5
Source File: ops_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #6
Source File: ops_test.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #7
Source File: ops_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #8
Source File: ops_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #9
Source File: ops_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #10
Source File: ops_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #11
Source File: ops_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #12
Source File: ops_test.py From models with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] def graph_fn(): images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) return ps_crop_and_pool, ps_crop pooled_output, unpooled_output = self.execute(graph_fn, []) self.assertAllClose(pooled_output, unpooled_output) # The following tests are only executed on CPU because the output # shape is not constant.
Example #13
Source File: ops_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output)
Example #14
Source File: ops_test.py From models with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] def graph_fn(): image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) return crop_and_pool, ps_crop_and_pool # Crop and resize is not supported on TPUs. expected_output, output = self.execute_cpu(graph_fn, []) self.assertAllClose(output, expected_output)
Example #15
Source File: ops_test.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)
Example #16
Source File: ops_test.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)
Example #17
Source File: ops_test.py From models with Apache License 2.0 | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) def graph_fn(): images = tf.constant( list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) return ps_crop output = self.execute(graph_fn, []) self.assertAllEqual(output, expected_output)
Example #18
Source File: ops_test.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)
Example #19
Source File: ops_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant( list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)
Example #20
Source File: ops_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)
Example #21
Source File: ops_test.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output)