Python datasets.task_evaluation.check_expected_results() Examples
The following are 11
code examples of datasets.task_evaluation.check_expected_results().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
datasets.task_evaluation
, or try the search function
.
Example #1
Source File: test_net.py From NucleiDetectron with Apache License 2.0 | 5 votes |
def main(ind_range=None, multi_gpu_testing=False): output_dir = get_output_dir(training=False) all_results = run_inference( output_dir, ind_range=ind_range, multi_gpu_testing=multi_gpu_testing ) if not ind_range: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) import json json.dump(all_results, open(os.path.join(output_dir, 'bbox_results_all.json'), 'w')) task_evaluation.log_copy_paste_friendly_results(all_results)
Example #2
Source File: test_engine.py From Detectron.pytorch with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #3
Source File: test_engine.py From FPN-Pytorch with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #4
Source File: test_engine.py From pcl.pytorch with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #5
Source File: test_engine.py From Detectron.pytorch with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #6
Source File: test_engine.py From Context-aware-ZSR with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if True: #is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #7
Source File: test_engine.py From PANet with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #8
Source File: test_engine.py From seg_every_thing with Apache License 2.0 | 4 votes |
def run_inference( weights_file, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False, ): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = get_output_dir(dataset_name, training=False) results = parent_func( weights_file, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = get_output_dir(dataset_name, training=False) return child_func( weights_file, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #9
Source File: test_engine.py From Large-Scale-VRD.pytorch with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #10
Source File: test_engine.py From detectron-self-train with MIT License | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results
Example #11
Source File: test_engine.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 4 votes |
def run_inference( args, ind_range=None, multi_gpu_testing=False, gpu_id=0, check_expected_results=False): parent_func, child_func = get_eval_functions() is_parent = ind_range is None def result_getter(): if is_parent: # Parent case: # In this case we're either running inference on the entire dataset in a # single process or (if multi_gpu_testing is True) using this process to # launch subprocesses that each run inference on a range of the dataset all_results = {} for i in range(len(cfg.TEST.DATASETS)): dataset_name, proposal_file = get_inference_dataset(i) output_dir = args.output_dir results = parent_func( args, dataset_name, proposal_file, output_dir, multi_gpu=multi_gpu_testing ) all_results.update(results) return all_results else: # Subprocess child case: # In this case test_net was called via subprocess.Popen to execute on a # range of inputs on a single dataset dataset_name, proposal_file = get_inference_dataset(0, is_parent=False) output_dir = args.output_dir return child_func( args, dataset_name, proposal_file, output_dir, ind_range=ind_range, gpu_id=gpu_id ) all_results = result_getter() if check_expected_results and is_parent: task_evaluation.check_expected_results( all_results, atol=cfg.EXPECTED_RESULTS_ATOL, rtol=cfg.EXPECTED_RESULTS_RTOL ) task_evaluation.log_copy_paste_friendly_results(all_results) return all_results