Python detectron.utils.timer.Timer() Examples

The following are 30 code examples of detectron.utils.timer.Timer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module detectron.utils.timer , or try the search function .
Example #1
Source File: training_stats.py    From KL-Loss with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model
        self.log = [] 
Example #2
Source File: training_stats.py    From Detectron with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model 
Example #3
Source File: training_stats.py    From CBNet with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model 
Example #4
Source File: training_stats.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model 
Example #5
Source File: training_stats.py    From Detectron-Cascade-RCNN with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model 
Example #6
Source File: training_stats.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 6 votes vote down vote up
def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model 
Example #7
Source File: generate_testdev_from_test.py    From Detectron-Cascade-RCNN with Apache License 2.0 5 votes vote down vote up
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename)) 
Example #8
Source File: generate_testdev_from_test.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename)) 
Example #9
Source File: data_loader_benchmark.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def loader_loop(roi_data_loader):
    load_timer = Timer()
    iters = 100
    for i in range(iters):
        load_timer.tic()
        roi_data_loader.get_next_minibatch()
        load_timer.toc()
        print('{:d}/{:d}: Average get_next_minibatch time: {:.3f}s'.format(
              i + 1, iters, load_timer.average_time)) 
Example #10
Source File: json_dataset.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
        assert dataset_catalog.contains(name), \
            'Unknown dataset name: {}'.format(name)
        assert os.path.exists(dataset_catalog.get_im_dir(name)), \
            'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
        assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
            'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
        logger.debug('Creating: {}'.format(name))
        self.name = name
        self.image_directory = dataset_catalog.get_im_dir(name)
        self.image_prefix = dataset_catalog.get_im_prefix(name)
        self.COCO = COCO(dataset_catalog.get_ann_fn(name))
        self.debug_timer = Timer()
        # Set up dataset classes
        category_ids = self.COCO.getCatIds()
        categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
        self.category_to_id_map = dict(zip(categories, category_ids))
        self.classes = ['__background__'] + categories
        self.num_classes = len(self.classes)
        self.json_category_id_to_contiguous_id = {
            v: i + 1
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints() 
Example #11
Source File: test_engine.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
Example #12
Source File: rpn_generator.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def generate_proposals_on_roidb(
    model, roidb, start_ind=None, end_ind=None, total_num_images=None,
    gpu_id=0,
):
    """Generate RPN proposals on all images in an imdb."""
    _t = Timer()
    num_images = len(roidb)
    roidb_boxes = [[] for _ in range(num_images)]
    roidb_scores = [[] for _ in range(num_images)]
    roidb_ids = [[] for _ in range(num_images)]
    if start_ind is None:
        start_ind = 0
        end_ind = num_images
        total_num_images = num_images
    for i in range(num_images):
        roidb_ids[i] = roidb[i]['id']
        im = cv2.imread(roidb[i]['image'])
        with c2_utils.NamedCudaScope(gpu_id):
            _t.tic()
            roidb_boxes[i], roidb_scores[i] = im_proposals(model, im)
            _t.toc()
        if i % 10 == 0:
            ave_time = _t.average_time
            eta_seconds = ave_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                (
                    'rpn_generate: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, ave_time, eta
                )
            )

    return roidb_boxes, roidb_scores, roidb_ids 
Example #13
Source File: generate_testdev_from_test.py    From Detectron with Apache License 2.0 5 votes vote down vote up
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename)) 
Example #14
Source File: data_loader_benchmark.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def loader_loop(roi_data_loader):
    load_timer = Timer()
    iters = 100
    for i in range(iters):
        load_timer.tic()
        roi_data_loader.get_next_minibatch()
        load_timer.toc()
        print('{:d}/{:d}: Average get_next_minibatch time: {:.3f}s'.format(
              i + 1, iters, load_timer.average_time)) 
Example #15
Source File: json_dataset.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
        assert dataset_catalog.contains(name), \
            'Unknown dataset name: {}'.format(name)
        assert os.path.exists(dataset_catalog.get_im_dir(name)), \
            'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
        assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
            'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
        logger.debug('Creating: {}'.format(name))
        self.name = name
        self.image_directory = dataset_catalog.get_im_dir(name)
        self.image_prefix = dataset_catalog.get_im_prefix(name)
        self.COCO = COCO(dataset_catalog.get_ann_fn(name))
        self.debug_timer = Timer()
        # Set up dataset classes
        category_ids = self.COCO.getCatIds()
        categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
        self.category_to_id_map = dict(zip(categories, category_ids))
        self.classes = ['__background__'] + categories
        self.num_classes = len(self.classes)
        self.json_category_id_to_contiguous_id = {
            v: i + 1
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints() 
Example #16
Source File: test_engine.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
Example #17
Source File: rpn_generator.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def generate_proposals_on_roidb(
    model, roidb, start_ind=None, end_ind=None, total_num_images=None,
    gpu_id=0,
):
    """Generate RPN proposals on all images in an imdb."""
    _t = Timer()
    num_images = len(roidb)
    roidb_boxes = [[] for _ in range(num_images)]
    roidb_scores = [[] for _ in range(num_images)]
    roidb_ids = [[] for _ in range(num_images)]
    if start_ind is None:
        start_ind = 0
        end_ind = num_images
        total_num_images = num_images
    for i in range(num_images):
        roidb_ids[i] = roidb[i]['id']
        im = cv2.imread(roidb[i]['image'])
        with c2_utils.NamedCudaScope(gpu_id):
            _t.tic()
            roidb_boxes[i], roidb_scores[i] = im_proposals(model, im)
            _t.toc()
        if i % 10 == 0:
            ave_time = _t.average_time
            eta_seconds = ave_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                (
                    'rpn_generate: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, ave_time, eta
                )
            )

    return roidb_boxes, roidb_scores, roidb_ids 
Example #18
Source File: generate_testdev_from_test.py    From Detectron-DA-Faster-RCNN with Apache License 2.0 5 votes vote down vote up
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename)) 
Example #19
Source File: data_loader_benchmark.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def loader_loop(roi_data_loader):
    load_timer = Timer()
    iters = 100
    for i in range(iters):
        load_timer.tic()
        roi_data_loader.get_next_minibatch()
        load_timer.toc()
        print('{:d}/{:d}: Average get_next_minibatch time: {:.3f}s'.format(
              i + 1, iters, load_timer.average_time)) 
Example #20
Source File: json_dataset.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
        assert dataset_catalog.contains(name), \
            'Unknown dataset name: {}'.format(name)
        assert os.path.exists(dataset_catalog.get_im_dir(name)), \
            'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
        assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
            'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
        logger.debug('Creating: {}'.format(name))
        self.name = name
        self.image_directory = dataset_catalog.get_im_dir(name)
        self.image_prefix = dataset_catalog.get_im_prefix(name)
        self.COCO = COCO(dataset_catalog.get_ann_fn(name))
        self.debug_timer = Timer()
        # Set up dataset classes
        category_ids = self.COCO.getCatIds()
        categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
        self.category_to_id_map = dict(zip(categories, category_ids))
        self.classes = ['__background__'] + categories
        self.num_classes = len(self.classes)
        self.json_category_id_to_contiguous_id = {
            v: i + 1
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints() 
Example #21
Source File: test_engine.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
Example #22
Source File: rpn_generator.py    From CBNet with Apache License 2.0 5 votes vote down vote up
def generate_proposals_on_roidb(
    model, roidb, start_ind=None, end_ind=None, total_num_images=None,
    gpu_id=0,
):
    """Generate RPN proposals on all images in an imdb."""
    _t = Timer()
    num_images = len(roidb)
    roidb_boxes = [[] for _ in range(num_images)]
    roidb_scores = [[] for _ in range(num_images)]
    roidb_ids = [[] for _ in range(num_images)]
    if start_ind is None:
        start_ind = 0
        end_ind = num_images
        total_num_images = num_images
    for i in range(num_images):
        roidb_ids[i] = roidb[i]['id']
        im = cv2.imread(roidb[i]['image'])
        with c2_utils.NamedCudaScope(gpu_id):
            _t.tic()
            roidb_boxes[i], roidb_scores[i] = im_proposals(model, im)
            _t.toc()
        if i % 10 == 0:
            ave_time = _t.average_time
            eta_seconds = ave_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            logger.info(
                (
                    'rpn_generate: range [{:d}, {:d}] of {:d}: '
                    '{:d}/{:d} {:.3f}s (eta: {})'
                ).format(
                    start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
                    start_ind + num_images, ave_time, eta
                )
            )

    return roidb_boxes, roidb_scores, roidb_ids 
Example #23
Source File: test_engine.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
Example #24
Source File: data_loader_benchmark.py    From KL-Loss with Apache License 2.0 5 votes vote down vote up
def loader_loop(roi_data_loader):
    load_timer = Timer()
    iters = 100
    for i in range(iters):
        load_timer.tic()
        roi_data_loader.get_next_minibatch()
        load_timer.toc()
        print('{:d}/{:d}: Average get_next_minibatch time: {:.3f}s'.format(
              i + 1, iters, load_timer.average_time)) 
Example #25
Source File: json_dataset.py    From KL-Loss with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
        assert dataset_catalog.contains(name), \
            'Unknown dataset name: {}'.format(name)
        assert os.path.exists(dataset_catalog.get_im_dir(name)), \
            'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
        assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
            'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
        logger.debug('Creating: {}'.format(name))
        self.name = name
        self.image_directory = dataset_catalog.get_im_dir(name)
        self.image_prefix = dataset_catalog.get_im_prefix(name)
        self.COCO = COCO(dataset_catalog.get_ann_fn(name))
        self.debug_timer = Timer()
        # Set up dataset classes
        category_ids = self.COCO.getCatIds()
        categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
        self.category_to_id_map = dict(zip(categories, category_ids))
        self.classes = ['__background__'] + categories
        self.num_classes = len(self.classes)
        self.json_category_id_to_contiguous_id = {
            v: i + 1
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints() 
Example #26
Source File: test_engine.py    From KL-Loss with Apache License 2.0 5 votes vote down vote up
def test_net_on_dataset(
    weights_file,
    dataset_name,
    proposal_file,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
            weights_file, dataset_name, proposal_file, num_images, output_dir
        )
    else:
        all_boxes, all_segms, all_keyps = test_net(
            weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    results = task_evaluation.evaluate_all(
        dataset, all_boxes, all_segms, all_keyps, output_dir
    )
    return results 
Example #27
Source File: rpn_generator.py    From KL-Loss with Apache License 2.0 5 votes vote down vote up
def generate_rpn_on_dataset(
    weights_file,
    dataset_name,
    _proposal_file_ignored,
    output_dir,
    multi_gpu=False,
    gpu_id=0
):
    """Run inference on a dataset."""
    dataset = JsonDataset(dataset_name)
    test_timer = Timer()
    test_timer.tic()
    if multi_gpu:
        num_images = len(dataset.get_roidb())
        _boxes, _scores, _ids, rpn_file = multi_gpu_generate_rpn_on_dataset(
            weights_file, dataset_name, _proposal_file_ignored, num_images,
            output_dir
        )
    else:
        # Processes entire dataset range by default
        _boxes, _scores, _ids, rpn_file = generate_rpn_on_range(
            weights_file,
            dataset_name,
            _proposal_file_ignored,
            output_dir,
            gpu_id=gpu_id
        )
    test_timer.toc()
    logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
    return evaluate_proposal_file(dataset, rpn_file, output_dir) 
Example #28
Source File: generate_testdev_from_test.py    From KL-Loss with Apache License 2.0 5 votes vote down vote up
def convert(json_file, output_dir):
    print('Reading: {}'.format(json_file))
    with open(json_file, 'r') as fid:
        dt = json.load(fid)
    print('done!')

    test_image_info = get_ann_fn('coco_2017_test')
    with open(test_image_info, 'r') as fid:
        info_test = json.load(fid)
    image_test = info_test['images']
    image_test_id = [i['id'] for i in image_test]
    print('{} has {} images'.format(test_image_info, len(image_test_id)))

    test_dev_image_info = get_ann_fn('coco_2017_test-dev')
    with open(test_dev_image_info, 'r') as fid:
        info_testdev = json.load(fid)
    image_testdev = info_testdev['images']
    image_testdev_id = [i['id'] for i in image_testdev]
    print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))

    dt_testdev = []
    print('Filtering test-dev from test...')
    t = Timer()
    t.tic()
    for i in range(len(dt)):
        if i % 1000 == 0:
            print('{}/{}'.format(i, len(dt)))
        if dt[i]['image_id'] in image_testdev_id:
            dt_testdev.append(dt[i])
    print('Done filtering ({:2}s)!'.format(t.toc()))

    filename, file_extension = os.path.splitext(os.path.basename(json_file))
    filename = filename + '_test-dev'
    filename = os.path.join(output_dir, filename + file_extension)
    with open(filename, 'w') as fid:
        info_test = json.dump(dt_testdev, fid)
    print('Done writing: {}!'.format(filename)) 
Example #29
Source File: data_loader_benchmark.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def loader_loop(roi_data_loader):
    load_timer = Timer()
    iters = 100
    for i in range(iters):
        load_timer.tic()
        roi_data_loader.get_next_minibatch()
        load_timer.toc()
        print('{:d}/{:d}: Average get_next_minibatch time: {:.3f}s'.format(
              i + 1, iters, load_timer.average_time)) 
Example #30
Source File: json_dataset.py    From Clustered-Object-Detection-in-Aerial-Image with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
        assert dataset_catalog.contains(name), \
            'Unknown dataset name: {}'.format(name)
        assert os.path.exists(dataset_catalog.get_im_dir(name)), \
            'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
        assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
            'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
        logger.debug('Creating: {}'.format(name))
        self.name = name
        self.image_directory = dataset_catalog.get_im_dir(name)
        self.image_prefix = dataset_catalog.get_im_prefix(name)
        self.COCO = COCO(dataset_catalog.get_ann_fn(name))
        self.debug_timer = Timer()
        # Set up dataset classes
        category_ids = self.COCO.getCatIds()
        categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
        self.category_to_id_map = dict(zip(categories, category_ids))
        self.classes = ['__background__'] + categories
        self.num_classes = len(self.classes)
        self.json_category_id_to_contiguous_id = {
            v: i + 1
            for i, v in enumerate(self.COCO.getCatIds())
        }
        self.contiguous_category_id_to_json_id = {
            v: k
            for k, v in self.json_category_id_to_contiguous_id.items()
        }
        self._init_keypoints()