Python mmcv.impad() Examples
The following are 30
code examples of mmcv.impad().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mmcv
, or try the search function
.
Example #1
Source File: transforms.py From ttfnet with Apache License 2.0 | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #2
Source File: transforms.py From IoU-Uniform-R-CNN with Apache License 2.0 | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #3
Source File: transforms.py From RDSNet with Apache License 2.0 | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #4
Source File: transforms.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #5
Source File: transforms.py From Cascade-RPN with Apache License 2.0 | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #6
Source File: transforms.py From kaggle-kuzushiji-recognition with MIT License | 6 votes |
def __call__(self, results): if results['keep_ratio']: gt_seg = mmcv.imrescale( results['gt_semantic_seg'], results['scale'], interpolation='nearest') else: gt_seg = mmcv.imresize( results['gt_semantic_seg'], results['scale'], interpolation='nearest') if results['flip']: gt_seg = mmcv.imflip(gt_seg) if gt_seg.shape != results['pad_shape']: gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) if self.scale_factor != 1: gt_seg = mmcv.imrescale( gt_seg, self.scale_factor, interpolation='nearest') results['gt_semantic_seg'] = gt_seg return results
Example #7
Source File: transforms.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def _pad_img(self, results): if self.size is not None: padded_img = mmcv.impad(results['img'], self.size) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results['img'], self.size_divisor, pad_val=self.pad_val) results['img'] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor
Example #8
Source File: transforms.py From Libra_R-CNN with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #9
Source File: transforms.py From FoveaBox with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): # aspect ratio unchanged if isinstance(scale_factor, float): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] # aspect ratio changed else: w_ratio, h_ratio = scale_factor[:2] if masks: h, w = masks[0].shape[:2] new_h = int(np.round(h * h_ratio)) new_w = int(np.round(w * w_ratio)) new_size = (new_w, new_h) masks = [ mmcv.imresize(mask, new_size, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #10
Source File: transforms.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): # aspect ratio unchanged if isinstance(scale_factor, float): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] # aspect ratio changed else: w_ratio, h_ratio = scale_factor[:2] if masks: h, w = masks[0].shape[:2] new_h = int(np.round(h * h_ratio)) new_w = int(np.round(w * w_ratio)) new_size = (new_w, new_h) masks = [ mmcv.imresize(mask, new_size, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #11
Source File: transforms.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def _pad_img(self, results): if self.size is not None: padded_img = mmcv.impad(results['img'], self.size) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results['img'], self.size_divisor, pad_val=self.pad_val) results['img'] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor
Example #12
Source File: transforms.py From Cascade-RPN with Apache License 2.0 | 5 votes |
def _pad_masks(self, results): pad_shape = results['pad_shape'][:2] for key in results.get('mask_fields', []): padded_masks = [ mmcv.impad(mask, pad_shape, pad_val=self.pad_val) for mask in results[key] ] results[key] = np.stack(padded_masks, axis=0)
Example #13
Source File: transforms.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): # aspect ratio unchanged if isinstance(scale_factor, float): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] # aspect ratio changed else: w_ratio, h_ratio = scale_factor[:2] if masks: h, w = masks[0].shape[:2] new_h = int(np.round(h * h_ratio)) new_w = int(np.round(w * w_ratio)) new_size = (new_w, new_h) masks = [ mmcv.imresize(mask, new_size, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #14
Source File: transforms.py From AugFPN with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #15
Source File: transforms.py From Feature-Selective-Anchor-Free-Module-for-Single-Shot-Object-Detection with Apache License 2.0 | 5 votes |
def _pad_masks(self, results): pad_shape = results['pad_shape'][:2] for key in results.get('mask_fields', []): padded_masks = [ mmcv.impad(mask, pad_shape, pad_val=self.pad_val) for mask in results[key] ] results[key] = np.stack(padded_masks, axis=0)
Example #16
Source File: transforms.py From kaggle-imaterialist with MIT License | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #17
Source File: transforms.py From hrnet with MIT License | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #18
Source File: transforms.py From CenterNet with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #19
Source File: transforms.py From ttfnet with Apache License 2.0 | 5 votes |
def _pad_img(self, results): if self.size is not None: padded_img = mmcv.impad(results['img'], self.size) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results['img'], self.size_divisor, pad_val=self.pad_val) results['img'] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor
Example #20
Source File: transforms.py From ttfnet with Apache License 2.0 | 5 votes |
def _pad_masks(self, results): pad_shape = results['pad_shape'][:2] for key in results.get('mask_fields', []): padded_masks = [ mmcv.impad(mask, pad_shape, pad_val=self.pad_val) for mask in results[key] ] results[key] = np.stack(padded_masks, axis=0)
Example #21
Source File: transforms.py From mmdetection with Apache License 2.0 | 5 votes |
def _pad_img(self, results): """Pad images according to ``self.size``.""" for key in results.get('img_fields', ['img']): if self.size is not None: padded_img = mmcv.impad( results[key], shape=self.size, pad_val=self.pad_val) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results[key], self.size_divisor, pad_val=self.pad_val) results[key] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor
Example #22
Source File: transforms.py From kaggle-kuzushiji-recognition with MIT License | 5 votes |
def _pad_img(self, results): if self.size is not None: padded_img = mmcv.impad(results['img'], self.size) elif self.size_divisor is not None: padded_img = mmcv.impad_to_multiple( results['img'], self.size_divisor, pad_val=self.pad_val) results['img'] = padded_img results['pad_shape'] = padded_img.shape results['pad_fixed_size'] = self.size results['pad_size_divisor'] = self.size_divisor
Example #23
Source File: transforms.py From mmdetection with Apache License 2.0 | 5 votes |
def _pad_seg(self, results): """Pad semantic segmentation map according to ``results['pad_shape']``.""" for key in results.get('seg_fields', []): results[key] = mmcv.impad( results[key], shape=results['pad_shape'][:2])
Example #24
Source File: structures.py From mmdetection with Apache License 2.0 | 5 votes |
def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape)
Example #25
Source File: transforms.py From AerialDetection with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #26
Source File: transforms.py From GCNet with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #27
Source File: transforms.py From mmdetection-annotated with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #28
Source File: transforms.py From mmdetection_with_SENet154 with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #29
Source File: transforms.py From PolarMask with Apache License 2.0 | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): # aspect ratio unchanged if isinstance(scale_factor, float): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] # aspect ratio changed else: w_ratio, h_ratio = scale_factor[:2] if masks: h, w = masks[0].shape[:2] new_h = int(np.round(h * h_ratio)) new_w = int(np.round(w * w_ratio)) new_size = (new_w, new_h) masks = [ mmcv.imresize(mask, new_size, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks
Example #30
Source File: transforms.py From kaggle-kuzushiji-recognition with MIT License | 5 votes |
def __call__(self, masks, pad_shape, scale_factor, flip=False): # aspect ratio unchanged if isinstance(scale_factor, float): masks = [ mmcv.imrescale(mask, scale_factor, interpolation='nearest') for mask in masks ] # aspect ratio changed else: w_ratio, h_ratio = scale_factor[:2] if masks: h, w = masks[0].shape[:2] new_h = int(np.round(h * h_ratio)) new_w = int(np.round(w * w_ratio)) new_size = (new_w, new_h) masks = [ mmcv.imresize(mask, new_size, interpolation='nearest') for mask in masks ] if flip: masks = [mask[:, ::-1] for mask in masks] padded_masks = [ mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks ] padded_masks = np.stack(padded_masks, axis=0) return padded_masks