Python torchvision.transforms.functional.center_crop() Examples
The following are 30
code examples of torchvision.transforms.functional.center_crop().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms.functional
, or try the search function
.
Example #1
Source File: transforms.py From ChaLearn_liveness_challenge with MIT License | 7 votes |
def __call__(self, img_dict): keys = ['rgb', 'ir', 'depth'] for k in keys: img = img_dict[k] w, h = img.size crop_h, crop_w = self.size if crop_w > w or crop_h > h: raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size, (h, w))) if self.crop_index == 0: img_dict[k] = F.center_crop(img, (crop_h, crop_w)) elif self.crop_index == 1: img_dict[k] = img.crop((0, 0, crop_w, crop_h)) elif self.crop_index == 2: img_dict[k] = img.crop((w - crop_w, 0, w, crop_h)) elif self.crop_index == 3: img_dict[k] = img.crop((0, h - crop_h, crop_w, h)) elif self.crop_index == 4: img_dict[k] = img.crop((w - crop_w, h - crop_h, w, h)) else: raise ValueError("Requested crop index is not in range(5)") return img_dict
Example #2
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #3
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #4
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #5
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #6
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #7
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #8
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #9
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #10
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #11
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #12
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #13
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #14
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #15
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #16
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #17
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #18
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #19
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #20
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #21
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #22
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #23
Source File: dataloader.py From Tag2Pix with MIT License | 6 votes |
def __call__(self, img1, img2): img1 = tvF.resize(img1, self.size, interpolation=Image.LANCZOS) img2 = tvF.resize(img2, self.size, interpolation=Image.LANCZOS) if random.random() < 0.5: img1 = tvF.hflip(img1) img2 = tvF.hflip(img2) if random.random() < 0.5: rot = random.uniform(-10, 10) crop_ratio = rot_crop(rot) img1 = tvF.rotate(img1, rot, resample=Image.BILINEAR) img2 = tvF.rotate(img2, rot, resample=Image.BILINEAR) img1 = tvF.center_crop(img1, int(img1.size[0] * crop_ratio)) img2 = tvF.center_crop(img2, int(img2.size[0] * crop_ratio)) i, j, h, w = self.get_params(img1, self.scale, self.ratio) # return the image with the same transformation return (tvF.resized_crop(img1, i, j, h, w, self.size, self.interpolation), tvF.resized_crop(img2, i, j, h, w, self.size, self.interpolation))
Example #24
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #25
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #26
Source File: transforms.py From medicaltorch with Apache License 2.0 | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #27
Source File: transforms.py From ChaLearn_liveness_challenge with MIT License | 6 votes |
def __call__(self, img_dict): keys = ['rgb', 'ir', 'depth'] for k in keys: img = img_dict[k] w, h = img.size crop_h, crop_w = self.size if crop_w > w or crop_h > h: raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size, (h, w))) if self.crop_index == 0: img_dict[k] = F.center_crop(img, (crop_h, crop_w)) elif self.crop_index == 1: img_dict[k] = img.crop((0, 0, crop_w, crop_h)) elif self.crop_index == 2: img_dict[k] = img.crop((w - crop_w, 0, w, crop_h)) elif self.crop_index == 3: img_dict[k] = img.crop((0, h - crop_h, crop_w, h)) elif self.crop_index == 4: img_dict[k] = img.crop((w - crop_w, h - crop_h, w, h)) else: raise ValueError("Requested crop index is not in range(5)") return img_dict
Example #28
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def __call__(self, sample): rdict = {} input_data = sample['input'] w, h = input_data.size th, tw = self.size fh = int(round((h - th) / 2.)) fw = int(round((w - tw) / 2.)) params = (fh, fw, w, h) self.propagate_params(sample, params) input_data = F.center_crop(input_data, self.size) rdict['input'] = input_data if self.labeled: gt_data = sample['gt'] gt_metadata = sample['gt_metadata'] gt_data = F.center_crop(gt_data, self.size) gt_metadata["__centercrop"] = (fh, fw, w, h) rdict['gt'] = gt_data sample.update(rdict) return sample
Example #29
Source File: cvfunctional.py From opencv_transforms_torchvision with MIT License | 6 votes |
def cv_transform(img): # img = resize(img, size=(100, 300)) # img = to_tensor(img) # img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # img = pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant') # img = pad(img, padding=(100, 100, 100, 100), fill=5, padding_mode='symmetric') # img = crop(img, -40, -20, 1000, 1000) # img = center_crop(img, (310, 300)) # img = resized_crop(img, -10.3, -20, 330, 220, (500, 500)) # img = hflip(img) # img = vflip(img) # tl, tr, bl, br, center = five_crop(img, 100) # img = adjust_brightness(img, 2.1) # img = adjust_contrast(img, 1.5) # img = adjust_saturation(img, 2.3) # img = adjust_hue(img, 0.5) # img = adjust_gamma(img, gamma=3, gain=0.1) # img = rotate(img, 10, resample='BILINEAR', expand=True, center=None) # img = to_grayscale(img, 3) # img = affine(img, 10, (0, 0), 1, 0, resample='BICUBIC', fillcolor=(255,255,0)) # img = gaussion_noise(img) # img = poisson_noise(img) img = salt_and_pepper(img) return to_tensor(img)
Example #30
Source File: cvfunctional.py From opencv_transforms_torchvision with MIT License | 6 votes |
def pil_transform(img): # img = functional.resize(img, size=(100, 300)) # img = functional.to_tensor(img) # img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant') # img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric') # img = functional.crop(img, -40, -20, 1000, 1000) # img = functional.center_crop(img, (310, 300)) # img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500)) # img = functional.hflip(img) # img = functional.vflip(img) # tl, tr, bl, br, center = functional.five_crop(img, 100) # img = functional.adjust_brightness(img, 2.1) # img = functional.adjust_contrast(img, 1.5) # img = functional.adjust_saturation(img, 2.3) # img = functional.adjust_hue(img, 0.5) # img = functional.adjust_gamma(img, gamma=3, gain=0.1) # img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None) # img = functional.to_grayscale(img, 3) # img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0)) return functional.to_tensor(img)