Python object_detection.utils.object_detection_evaluation.OpenImagesDetectionChallengeEvaluator() Examples
The following are 6
code examples of object_detection.utils.object_detection_evaluation.OpenImagesDetectionChallengeEvaluator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
object_detection.utils.object_detection_evaluation
, or try the search function
.
Example #1
Source File: oid_od_challenge_evaluation.py From vehicle_counting_tensorflow with MIT License | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)
Example #2
Source File: oid_od_challenge_evaluation.py From BMW-TensorFlow-Training-GUI with Apache License 2.0 | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)
Example #3
Source File: oid_od_challenge_evaluation.py From MAX-Object-Detector with Apache License 2.0 | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)
Example #4
Source File: oid_od_challenge_evaluation.py From open-solution-googleai-object-detection with MIT License | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)
Example #5
Source File: oid_od_challenge_evaluation.py From g-tensorflow-models with Apache License 2.0 | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)
Example #6
Source File: oid_od_challenge_evaluation.py From multilabel-image-classification-tensorflow with MIT License | 5 votes |
def main(parsed_args): all_box_annotations = pd.read_csv(parsed_args.input_annotations_boxes) all_label_annotations = pd.read_csv(parsed_args.input_annotations_labels) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) all_annotations = pd.concat([all_box_annotations, all_label_annotations]) class_label_map, categories = _load_labelmap(parsed_args.input_class_labelmap) challenge_evaluator = ( object_detection_evaluation.OpenImagesDetectionChallengeEvaluator( categories)) for _, groundtruth in enumerate(all_annotations.groupby('ImageID')): image_id, image_groundtruth = groundtruth groundtruth_dictionary = utils.build_groundtruth_boxes_dictionary( image_groundtruth, class_label_map) challenge_evaluator.add_single_ground_truth_image_info( image_id, groundtruth_dictionary) all_predictions = pd.read_csv(parsed_args.input_predictions) for _, prediction_data in enumerate(all_predictions.groupby('ImageID')): image_id, image_predictions = prediction_data prediction_dictionary = utils.build_predictions_dictionary( image_predictions, class_label_map) challenge_evaluator.add_single_detected_image_info(image_id, prediction_dictionary) metrics = challenge_evaluator.evaluate() with open(parsed_args.output_metrics, 'w') as fid: io_utils.write_csv(fid, metrics)