Python mmcv.imread() Examples
The following are 30
code examples of mmcv.imread().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mmcv
, or try the search function
.
Example #1
Source File: inference.py From mmdetection with Apache License 2.0 | 6 votes |
def __call__(self, results): """Call function to load images into results. Args: results (dict): A result dict contains the file name of the image to be read. Returns: dict: ``results`` will be returned containing loaded image. """ if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #2
Source File: test_optflow.py From mmcv with Apache License 2.0 | 6 votes |
def test_flowwrite(): flow = np.random.rand(100, 100, 2).astype(np.float32) # write to a .flo file _, filename = tempfile.mkstemp() mmcv.flowwrite(flow, filename) flow_from_file = mmcv.flowread(filename) assert_array_equal(flow, flow_from_file) os.remove(filename) # write to two .jpg files tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_flow.jpg') for concat_axis in range(2): mmcv.flowwrite( flow, tmp_filename, quantize=True, concat_axis=concat_axis) shape = (200, 100) if concat_axis == 0 else (100, 200) assert osp.isfile(tmp_filename) assert mmcv.imread(tmp_filename, flag='unchanged').shape == shape os.remove(tmp_filename) # test exceptions with pytest.raises(AssertionError): mmcv.flowwrite(flow, tmp_filename, quantize=True, concat_axis=2)
Example #3
Source File: inference.py From Libra_R-CNN with Apache License 2.0 | 5 votes |
def _inference_single(model, img, img_transform, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, model.cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #4
Source File: loading.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def __call__(self, results): filename = osp.join(results['img_prefix'], results['img_info']['filename']) img = mmcv.imread(filename) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #5
Source File: rawframes_dataset.py From mmaction with Apache License 2.0 | 5 votes |
def _load_image(self, directory, image_tmpl, modality, idx): if modality in ['RGB', 'RGBDiff']: return [mmcv.imread(osp.join(directory, image_tmpl.format(idx)))] elif modality == 'Flow': x_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('x', idx)), flag='grayscale') y_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('y', idx)), flag='grayscale') return [x_imgs, y_imgs] else: raise ValueError( 'Not implemented yet; modality should be ' '["RGB", "RGBDiff", "Flow"]')
Example #6
Source File: loading.py From kaggle-kuzushiji-recognition with MIT License | 5 votes |
def _load_semantic_seg(self, results): results['gt_semantic_seg'] = mmcv.imread( osp.join(results['seg_prefix'], results['ann_info']['seg_map']), flag='unchanged').squeeze() return results
Example #7
Source File: high_api.py From hrnet with MIT License | 5 votes |
def human_boxes_get(model, img, score_thr=0.5): if isinstance(img, str): img = mmcv.imread(img) result = inference_detector(model, img, cfg, device='cuda:0') bboxes, scores = re_result(result, score_thr=score_thr) return bboxes, scores
Example #8
Source File: inference.py From FoveaBox with Apache License 2.0 | 5 votes |
def _inference_single(model, img, img_transform, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, model.cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #9
Source File: inference.py From hrnet with MIT License | 5 votes |
def _inference_single(model, img, img_transform, cfg, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #10
Source File: inference.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def __call__(self, results): if isinstance(results['img'], str): results['filename'] = results['img'] else: results['filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #11
Source File: inference.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def __call__(self, results): if isinstance(results['img'], str): results['filename'] = results['img'] else: results['filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #12
Source File: ssn_dataset.py From mmaction with Apache License 2.0 | 5 votes |
def _load_image(self, directory, image_tmpl, modality, idx): if modality in ['RGB', 'RGBDiff']: return [mmcv.imread(osp.join(directory, image_tmpl.format(idx)))] elif modality == 'Flow': x_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('x', idx)), flag='grayscale') y_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('y', idx)), flag='grayscale') return [x_imgs, y_imgs] else: raise ValueError( 'Not implemented yet; modality should be ' '["RGB", "RGBDiff", "Flow"]')
Example #13
Source File: loading.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def __call__(self, results): filename = osp.join(results['img_prefix'], results['img_info']['filename']) img = mmcv.imread(filename) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #14
Source File: loading.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def _load_semantic_seg(self, results): results['gt_semantic_seg'] = mmcv.imread( osp.join(results['seg_prefix'], results['ann_info']['seg_map']), flag='unchanged').squeeze() return results
Example #15
Source File: inference.py From kaggle-imaterialist with MIT License | 5 votes |
def _inference_single(model, img, img_transform, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, model.cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #16
Source File: ava_dataset.py From mmaction with Apache License 2.0 | 5 votes |
def _load_image(self, directory, image_tmpl, modality, idx): if modality in ['RGB', 'RGBDiff']: return [mmcv.imread(osp.join(directory, image_tmpl.format(idx)))] elif modality == 'Flow': x_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('x', idx)), flag='grayscale') y_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('y', idx)), flag='grayscale') return [x_imgs, y_imgs] else: raise ValueError( 'Not implemented yet; modality should be ' '["RGB", "RGBDiff", "Flow"]')
Example #17
Source File: loading.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def _load_semantic_seg(self, results): results['gt_semantic_seg'] = mmcv.imread( osp.join(results['seg_prefix'], results['ann_info']['seg_map']), flag='unchanged').squeeze() return results
Example #18
Source File: inference.py From Libra_R-CNN with Apache License 2.0 | 5 votes |
def show_result(img, result, class_names, score_thr=0.3, out_file=None): """Visualize the detection results on the image. Args: img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str] or tuple[str]): A list of class names. score_thr (float): The threshold to visualize the bboxes and masks. out_file (str, optional): If specified, the visualization result will be written to the out file instead of shown in a window. """ assert isinstance(class_names, (tuple, list)) img = mmcv.imread(img) if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) # draw segmentation masks if segm_result is not None: segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) img[mask] = img[mask] * 0.5 + color_mask * 0.5 # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) mmcv.imshow_det_bboxes( img.copy(), bboxes, labels, class_names=class_names, score_thr=score_thr, show=out_file is None, out_file=out_file)
Example #19
Source File: inference.py From hrnet with MIT License | 5 votes |
def show_result(img, result, dataset='coco', score_thr=0.5, out_file=None, wait_time=0): img = mmcv.imread(img) class_names = get_classes(dataset) if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) # draw segmentation masks if segm_result is not None: segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint( 0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) img[mask] = img[mask] * 0.5 + color_mask * 0.5 # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) mmcv.imshow_det_bboxes( img.copy(), bboxes, labels, class_names=class_names, score_thr=score_thr, show=out_file is None, wait_time=wait_time) return re_bboxes, re_scores
Example #20
Source File: inference.py From Reasoning-RCNN with Apache License 2.0 | 5 votes |
def show_result(img, result, dataset='coco', score_thr=0.3): class_names = get_classes(dataset) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(result) ] labels = np.concatenate(labels) bboxes = np.vstack(result) img = mmcv.imread(img) mmcv.imshow_det_bboxes( img.copy(), bboxes, labels, class_names=class_names, score_thr=score_thr)
Example #21
Source File: inference.py From Reasoning-RCNN with Apache License 2.0 | 5 votes |
def _inference_single(model, img, img_transform, cfg, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #22
Source File: loading.py From IoU-Uniform-R-CNN with Apache License 2.0 | 5 votes |
def _load_semantic_seg(self, results): results['gt_semantic_seg'] = mmcv.imread( osp.join(results['seg_prefix'], results['ann_info']['seg_map']), flag='unchanged').squeeze() return results
Example #23
Source File: loading.py From IoU-Uniform-R-CNN with Apache License 2.0 | 5 votes |
def __call__(self, results): if results['img_prefix'] is not None: filename = osp.join(results['img_prefix'], results['img_info']['filename']) else: filename = results['img_info']['filename'] img = mmcv.imread(filename) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #24
Source File: inference.py From IoU-Uniform-R-CNN with Apache License 2.0 | 5 votes |
def __call__(self, results): if isinstance(results['img'], str): results['filename'] = results['img'] else: results['filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #25
Source File: loading.py From RDSNet with Apache License 2.0 | 5 votes |
def _load_semantic_seg(self, results): results['gt_semantic_seg'] = mmcv.imread( osp.join(results['seg_prefix'], results['ann_info']['seg_map']), flag='unchanged').squeeze() return results
Example #26
Source File: loading.py From RDSNet with Apache License 2.0 | 5 votes |
def __call__(self, results): filename = osp.join(results['img_prefix'], results['img_info']['filename']) img = mmcv.imread(filename) if self.to_float32: img = img.astype(np.float32) results['filename'] = filename results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #27
Source File: inference.py From CenterNet with Apache License 2.0 | 5 votes |
def _inference_single(model, img, img_transform, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, model.cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result
Example #28
Source File: inference.py From RDSNet with Apache License 2.0 | 5 votes |
def __call__(self, results): if isinstance(results['img'], str): results['filename'] = results['img'] else: results['filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
Example #29
Source File: inference.py From Grid-R-CNN with Apache License 2.0 | 5 votes |
def show_result(img, result, class_names, score_thr=0.3, out_file=None): """Visualize the detection results on the image. Args: img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str] or tuple[str]): A list of class names. score_thr (float): The threshold to visualize the bboxes and masks. out_file (str, optional): If specified, the visualization result will be written to the out file instead of shown in a window. """ assert isinstance(class_names, (tuple, list)) img = mmcv.imread(img) if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) # draw segmentation masks if segm_result is not None: segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) img[mask] = img[mask] * 0.5 + color_mask * 0.5 # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) mmcv.imshow_det_bboxes( img.copy(), bboxes, labels, class_names=class_names, score_thr=score_thr, show=out_file is None, out_file=out_file)
Example #30
Source File: inference.py From Grid-R-CNN with Apache License 2.0 | 5 votes |
def _inference_single(model, img, img_transform, device): img = mmcv.imread(img) data = _prepare_data(img, img_transform, model.cfg, device) with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result