Python object_detection.models.feature_map_generators.fpn_top_down_feature_maps() Examples
The following are 26
code examples of object_detection.models.feature_map_generators.fpn_top_down_feature_maps().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.models.feature_map_generators
, or try the search function
.
Example #1
Source File: feature_map_generators_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_get_expected_feature_map_shapes_with_depthwise(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128, use_depthwise=True) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #2
Source File: feature_map_generators_test.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def test_get_expected_feature_map_shapes_with_depthwise(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128, use_depthwise=True) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #3
Source File: feature_map_generators_test.py From multilabel-image-classification-tensorflow with MIT License | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #4
Source File: feature_map_generators_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes_with_depthwise(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128, use_depthwise=True) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #5
Source File: feature_map_generators_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #6
Source File: feature_map_generators_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes_with_depthwise(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128, use_depthwise=True) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #7
Source File: feature_map_generators_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #8
Source File: feature_map_generators_test.py From Elphas with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ tf.random_uniform([4, 8, 8, 256], dtype=tf.float32), tf.random_uniform([4, 4, 4, 256], dtype=tf.float32), tf.random_uniform([4, 2, 2, 256], dtype=tf.float32), tf.random_uniform([4, 1, 1, 256], dtype=tf.float32), ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_feature_map_0': (4, 8, 8, 128), 'top_down_feature_map_1': (4, 4, 4, 128), 'top_down_feature_map_2': (4, 2, 2, 128), 'top_down_feature_map_3': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #9
Source File: feature_map_generators_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #10
Source File: feature_map_generators_test.py From ros_tensorflow with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ tf.random_uniform([4, 8, 8, 256], dtype=tf.float32), tf.random_uniform([4, 4, 4, 256], dtype=tf.float32), tf.random_uniform([4, 2, 2, 256], dtype=tf.float32), tf.random_uniform([4, 1, 1, 256], dtype=tf.float32), ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_feature_map_0': (4, 8, 8, 128), 'top_down_feature_map_1': (4, 4, 4, 128), 'top_down_feature_map_2': (4, 2, 2, 128), 'top_down_feature_map_3': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #11
Source File: feature_map_generators_test.py From Gun-Detector with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ tf.random_uniform([4, 8, 8, 256], dtype=tf.float32), tf.random_uniform([4, 4, 4, 256], dtype=tf.float32), tf.random_uniform([4, 2, 2, 256], dtype=tf.float32), tf.random_uniform([4, 1, 1, 256], dtype=tf.float32), ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_feature_map_0': (4, 8, 8, 128), 'top_down_feature_map_1': (4, 4, 4, 128), 'top_down_feature_map_2': (4, 2, 2, 128), 'top_down_feature_map_3': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #12
Source File: feature_map_generators_test.py From Traffic-Rule-Violation-Detection-System with MIT License | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ tf.random_uniform([4, 8, 8, 256], dtype=tf.float32), tf.random_uniform([4, 4, 4, 256], dtype=tf.float32), tf.random_uniform([4, 2, 2, 256], dtype=tf.float32), tf.random_uniform([4, 1, 1, 256], dtype=tf.float32), ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_feature_map_0': (4, 8, 8, 128), 'top_down_feature_map_1': (4, 4, 4, 128), 'top_down_feature_map_2': (4, 2, 2, 128), 'top_down_feature_map_3': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #13
Source File: feature_map_generators_test.py From Person-Detection-and-Tracking with MIT License | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #14
Source File: feature_map_generators_test.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ tf.random_uniform([4, 8, 8, 256], dtype=tf.float32), tf.random_uniform([4, 4, 4, 256], dtype=tf.float32), tf.random_uniform([4, 2, 2, 256], dtype=tf.float32), tf.random_uniform([4, 1, 1, 256], dtype=tf.float32), ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_feature_map_0': (4, 8, 8, 128), 'top_down_feature_map_1': (4, 4, 4, 128), 'top_down_feature_map_2': (4, 2, 2, 128), 'top_down_feature_map_3': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #15
Source File: feature_map_generators_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_get_expected_feature_map_shapes(self): image_features = [ ('block2', tf.random_uniform([4, 8, 8, 256], dtype=tf.float32)), ('block3', tf.random_uniform([4, 4, 4, 256], dtype=tf.float32)), ('block4', tf.random_uniform([4, 2, 2, 256], dtype=tf.float32)), ('block5', tf.random_uniform([4, 1, 1, 256], dtype=tf.float32)) ] feature_maps = feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=128) expected_feature_map_shapes = { 'top_down_block2': (4, 8, 8, 128), 'top_down_block3': (4, 4, 4, 128), 'top_down_block4': (4, 2, 2, 128), 'top_down_block5': (4, 1, 1, 128) } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) out_feature_maps = sess.run(feature_maps) out_feature_map_shapes = {key: value.shape for key, value in out_feature_maps.items()} self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
Example #16
Source File: feature_map_generators_test.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def _build_feature_map_generator( self, image_features, depth, use_keras, use_bounded_activations=False, use_native_resize_op=False, use_explicit_padding=False, use_depthwise=False): if use_keras: return feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=len(image_features), depth=depth, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op, scope=None, name='FeatureMaps', ) else: def feature_map_generator(image_features): return feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=depth, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op) return feature_map_generator
Example #17
Source File: feature_map_generators_test.py From models with Apache License 2.0 | 5 votes |
def _build_feature_map_generator( self, image_features, depth, use_bounded_activations=False, use_native_resize_op=False, use_explicit_padding=False, use_depthwise=False): if tf_version.is_tf2(): return feature_map_generators.KerasFpnTopDownFeatureMaps( num_levels=len(image_features), depth=depth, is_training=True, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op, scope=None, name='FeatureMaps', ) else: def feature_map_generator(image_features): return feature_map_generators.fpn_top_down_feature_maps( image_features=image_features, depth=depth, use_depthwise=use_depthwise, use_explicit_padding=use_explicit_padding, use_bounded_activations=use_bounded_activations, use_native_resize_op=use_native_resize_op) return feature_map_generator
Example #18
Source File: ssd_resnet_v1_fpn_feature_extractor.py From Gun-Detector with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams_fn()): for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6'] ], depth=256, scope='top_down_features') return feature_maps.values()
Example #19
Source File: ssd_resnet_v1_fpn_feature_extractor.py From ros_tensorflow with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams_fn()): for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6'] ], depth=256, scope='top_down_features') return feature_maps.values()
Example #20
Source File: ssd_mobilenet_v1_fpn_feature_extractor.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(256)) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(256), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) feature_maps.append(last_feature_map) return feature_maps
Example #21
Source File: ssd_resnet_v1_fpn_feature_extractor.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, min_base_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) image_features = self._filter_features(image_features) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth)) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( base_fpn_max_level - 1)] # Construct coarse features for i in range(base_fpn_max_level, self._fpn_max_level): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) feature_maps.append(last_feature_map) return feature_maps
Example #22
Source File: ssd_resnet_v1_fpn_feature_extractor.py From Traffic-Rule-Violation-Detection-System with MIT License | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training and self._batch_norm_trainable, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams): for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6'] ], depth=256, scope='top_down_features') return feature_maps.values()
Example #23
Source File: ssd_resnet_v1_fpn_feature_extractor.py From Elphas with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training and self._batch_norm_trainable, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams): for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6'] ], depth=256, scope='top_down_features') return feature_maps.values()
Example #24
Source File: ssd_resnet_v1_fpn_feature_extractor.py From Person-Detection-and-Tracking with MIT License | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in ['block2', 'block3', 'block4']], depth=256) last_feature_map = fpn_features['top_down_block4'] coarse_features = {} for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) coarse_features['bottom_up_block{}'.format(i)] = last_feature_map return [fpn_features['top_down_block2'], fpn_features['top_down_block3'], fpn_features['top_down_block4'], coarse_features['bottom_up_block5'], coarse_features['bottom_up_block6']]
Example #25
Source File: ssd_resnet_v1_fpn_feature_extractor.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Raises: ValueError: depth multiplier is not supported. """ if self._depth_multiplier != 1.0: raise ValueError('Depth multiplier not supported.') preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=self._is_training and self._batch_norm_trainable, global_pool=False, output_stride=None, store_non_strided_activations=True, scope=scope) image_features = self._filter_features(image_features) last_feature_map = image_features['block4'] with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): with slim.arg_scope(self._conv_hyperparams): for i in range(5, 7): last_feature_map = slim.conv2d( last_feature_map, num_outputs=256, kernel_size=[3, 3], stride=2, padding='SAME', scope='block{}'.format(i)) image_features['bottomup_{}'.format(i)] = last_feature_map feature_maps = feature_map_generators.fpn_top_down_feature_maps( [ image_features[key] for key in ['block2', 'block3', 'block4', 'bottomup_5', 'bottomup_6'] ], depth=256, scope='top_down_features') return feature_maps.values()
Example #26
Source File: ssd_resnet_v1_fpn_feature_extractor.py From models with Apache License 2.0 | 4 votes |
def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 129, preprocessed_inputs) with tf.variable_scope( self._resnet_scope_name, reuse=self._reuse_weights) as scope: with slim.arg_scope(resnet_v1.resnet_arg_scope()): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = self._resnet_base_fn( inputs=ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), num_classes=None, is_training=None, global_pool=False, output_stride=None, store_non_strided_activations=True, min_base_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) image_features = self._filter_features(image_features) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope(self._fpn_scope_name, reuse=self._reuse_weights): base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append('block{}'.format(level - 1)) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_native_resize_op=self._use_native_resize_op) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append( fpn_features['top_down_block{}'.format(level - 1)]) last_feature_map = fpn_features['top_down_block{}'.format( base_fpn_max_level - 1)] # Construct coarse features for i in range(base_fpn_max_level, self._fpn_max_level): last_feature_map = slim.conv2d( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[3, 3], stride=2, padding='SAME', scope='bottom_up_block{}'.format(i)) feature_maps.append(last_feature_map) return feature_maps