Python object_detection.protos.eval_pb2.EvalConfig() Examples
The following are 30
code examples of object_detection.protos.eval_pb2.EvalConfig().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.protos.eval_pb2
, or try the search function
.
Example #1
Source File: eval.py From hands-detection with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #2
Source File: eval.py From MBMD with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #3
Source File: eval_util_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute, resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #4
Source File: eval.py From DOTA_models with Apache License 2.0 | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #5
Source File: eval_util_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
Example #6
Source File: eval_util_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #7
Source File: eval.py From object_detection_kitti with Apache License 2.0 | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #8
Source File: eval.py From object_detector_app with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #9
Source File: eval.py From garbage-object-detection-tensorflow with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #10
Source File: eval_util_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute, resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #11
Source File: eval_util_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #12
Source File: eval.py From HereIsWally with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #13
Source File: eval_util_test.py From g-tensorflow-models with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
Example #14
Source File: eval.py From moveo_ros with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #15
Source File: eval_util_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
Example #16
Source File: eval.py From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #17
Source File: eval_util_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #18
Source File: eval.py From tensorflow with BSD 2-Clause "Simplified" License | 6 votes |
def get_configs_from_pipeline_file(): """Reads evaluation configuration from a pipeline_pb2.TrainEvalPipelineConfig. Reads evaluation config from file specified by pipeline_config_path flag. Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f: text_format.Merge(f.read(), pipeline_config) model_config = pipeline_config.model if FLAGS.eval_training_data: eval_config = pipeline_config.train_config else: eval_config = pipeline_config.eval_config input_config = pipeline_config.eval_input_reader return model_config, eval_config, input_config
Example #19
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #20
Source File: eval_util_test.py From MAX-Object-Detector with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute, resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #21
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict() metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) print(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
Example #22
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 6 votes |
def test_get_eval_metric_ops_for_coco_detections_and_masks(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict() metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
Example #23
Source File: eval.py From object_detection_kitti with Apache License 2.0 | 5 votes |
def get_configs_from_multiple_files(): """Reads evaluation configuration from multiple config files. Reads the evaluation config from the following files: model_config: Read from --model_config_path eval_config: Read from --eval_config_path input_config: Read from --input_config_path Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ eval_config = eval_pb2.EvalConfig() with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f: text_format.Merge(f.read(), eval_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, eval_config, input_config
Example #24
Source File: eval.py From DOTA_models with Apache License 2.0 | 5 votes |
def get_configs_from_multiple_files(): """Reads evaluation configuration from multiple config files. Reads the evaluation config from the following files: model_config: Read from --model_config_path eval_config: Read from --eval_config_path input_config: Read from --input_config_path Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ eval_config = eval_pb2.EvalConfig() with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f: text_format.Merge(f.read(), eval_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, eval_config, input_config
Example #25
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_get_evaluator_with_no_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options=None) # Even though we are setting eval_config.include_metrics_per_category = True # this option is never passed into the DetectionEvaluator constructor (via # `evaluator_options`). self.assertFalse(evaluator[0]._include_metrics_per_category)
Example #26
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category'])
Example #27
Source File: eval_util_test.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['unsupported_metric']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict() with self.assertRaises(ValueError): eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict)
Example #28
Source File: eval.py From MBMD with MIT License | 5 votes |
def get_configs_from_multiple_files(): """Reads evaluation configuration from multiple config files. Reads the evaluation config from the following files: model_config: Read from --model_config_path eval_config: Read from --eval_config_path input_config: Read from --input_config_path Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ eval_config = eval_pb2.EvalConfig() with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f: text_format.Merge(f.read(), eval_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, eval_config, input_config
Example #29
Source File: eval.py From hands-detection with MIT License | 5 votes |
def get_configs_from_multiple_files(): """Reads evaluation configuration from multiple config files. Reads the evaluation config from the following files: model_config: Read from --model_config_path eval_config: Read from --eval_config_path input_config: Read from --input_config_path Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ eval_config = eval_pb2.EvalConfig() with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f: text_format.Merge(f.read(), eval_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, eval_config, input_config
Example #30
Source File: visualize_res.py From MBMD with MIT License | 5 votes |
def get_configs_from_multiple_files(): """Reads evaluation configuration from multiple config files. Reads the evaluation config from the following files: model_config: Read from --model_config_path eval_config: Read from --eval_config_path input_config: Read from --input_config_path Returns: model_config: a model_pb2.DetectionModel eval_config: a eval_pb2.EvalConfig input_config: a input_reader_pb2.InputReader """ eval_config = eval_pb2.EvalConfig() with tf.gfile.GFile(FLAGS.eval_config_path, 'r') as f: text_format.Merge(f.read(), eval_config) model_config = model_pb2.DetectionModel() with tf.gfile.GFile(FLAGS.model_config_path, 'r') as f: text_format.Merge(f.read(), model_config) input_config = input_reader_pb2.InputReader() with tf.gfile.GFile(FLAGS.input_config_path, 'r') as f: text_format.Merge(f.read(), input_config) return model_config, eval_config, input_config