Python torchvision.transforms.functional.to_grayscale() Examples

The following are 10 code examples of torchvision.transforms.functional.to_grayscale(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.transforms.functional , or try the search function .
Example #1
Source File: cvfunctional.py    From opencv_transforms_torchvision with MIT License 7 votes vote down vote up
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.

    Args:
        img (np.ndarray): Image to be converted to grayscale.

    Returns:
        CV Image:  Grayscale version of the image.
                    if num_output_channels == 1 : returned image is single channel
                    if num_output_channels == 3 : returned image is 3 channel with r == g == b
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    elif num_output_channels == 3:
        img = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img 
Example #2
Source File: cvfunctional.py    From opencv_transforms_torchvision with MIT License 6 votes vote down vote up
def cv_transform(img):
    # img = resize(img, size=(100, 300))
    # img = to_tensor(img)
    # img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = pad(img, padding=(100, 100, 100, 100), fill=5, padding_mode='symmetric')
    # img = crop(img, -40, -20, 1000, 1000)
    # img = center_crop(img, (310, 300))
    # img = resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = hflip(img)
    # img = vflip(img)
    # tl, tr, bl, br, center = five_crop(img, 100)
    # img = adjust_brightness(img, 2.1)
    # img = adjust_contrast(img, 1.5)
    # img = adjust_saturation(img, 2.3)
    # img = adjust_hue(img, 0.5)
    # img = adjust_gamma(img, gamma=3, gain=0.1)
    # img = rotate(img, 10, resample='BILINEAR', expand=True, center=None)
    # img = to_grayscale(img, 3)
    # img = affine(img, 10, (0, 0), 1, 0, resample='BICUBIC', fillcolor=(255,255,0))
    # img = gaussion_noise(img)
    # img = poisson_noise(img)
    img = salt_and_pepper(img)
    return to_tensor(img) 
Example #3
Source File: cvfunctional.py    From opencv_transforms_torchvision with MIT License 6 votes vote down vote up
def pil_transform(img):
    # img = functional.resize(img, size=(100, 300))
    # img = functional.to_tensor(img)
    # img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    # img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant')
    # img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric')
    # img = functional.crop(img, -40, -20, 1000, 1000)
    # img = functional.center_crop(img, (310, 300))
    # img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500))
    # img = functional.hflip(img)
    # img = functional.vflip(img)
    # tl, tr, bl, br, center = functional.five_crop(img, 100)
    # img = functional.adjust_brightness(img, 2.1)
    # img = functional.adjust_contrast(img, 1.5)
    # img = functional.adjust_saturation(img, 2.3)
    # img = functional.adjust_hue(img, 0.5)
    # img = functional.adjust_gamma(img, gamma=3, gain=0.1)
    # img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None)
    # img = functional.to_grayscale(img, 3)
    # img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0))

    return functional.to_tensor(img) 
Example #4
Source File: video_transforms.py    From pvse with MIT License 6 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        return F.to_grayscale(img, num_output_channels=self.num_output_channels) 
Example #5
Source File: video_transforms.py    From pvse with MIT License 6 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        num_output_channels = 1 if img.mode == 'L' else 3
        if random.random() < self.p:
            return F.to_grayscale(img, num_output_channels=num_output_channels)
        return img 
Example #6
Source File: transforms.py    From ChaLearn_liveness_challenge with MIT License 5 votes vote down vote up
def __call__(self, img_dict):
        if random.random() < self.p:
            keys = ['rgb']
            for key in keys:
                num_output_channels = 1 if img_dict[key].mode == 'L' else 3
                img_dict[key] = F.to_grayscale(img_dict[key], num_output_channels=num_output_channels)
        return img_dict 
Example #7
Source File: transforms.py    From ChaLearn_liveness_challenge with MIT License 5 votes vote down vote up
def __call__(self, img_dict):
        if random.random() < self.p:
            keys = ['rgb']
            for key in keys:
                num_output_channels = 1 if img_dict[key].mode == 'L' else 3
                img_dict[key] = F.to_grayscale(img_dict[key], num_output_channels=num_output_channels)
        return img_dict 
Example #8
Source File: transforms.py    From NWPU-Crowd-Sample-Code with MIT License 5 votes vote down vote up
def __call__(self, img):
        if random.random() < 0.1:
            return  TrF.to_grayscale(img, num_output_channels=3)
        else: 
            return img 
Example #9
Source File: benchmark.py    From albumentations with MIT License 5 votes vote down vote up
def torchvision_transform(self, img):
        return torchvision.to_grayscale(img, num_output_channels=3) 
Example #10
Source File: dataloader.py    From mobile-hair-segmentation-pytorch with MIT License 4 votes vote down vote up
def transform(image, mask, image_size=224):
    # Resize
    resized_num = int(random.random() * image_size)
    resize = transforms.Resize(size=(image_size + resized_num, image_size + resized_num))
    image = resize(image)
    mask = resize(mask)

    # num_pad = int(random.random() * image_size)
    # image = TF.pad(image, num_pad, padding_mode='edge')
    # mask = TF.pad(mask, num_pad)

    # # Random crop
    # i, j, h, w = transforms.RandomCrop.get_params(
    #     image, output_size=(image_size, image_size))
    # image = TF.crop(image, i, j, h, w)
    # mask = TF.crop(mask, i, j, h, w)


    # # Random horizontal flipping
    # if random.random() > 0.5:
    #     image = TF.hflip(image)
    #     mask = TF.hflip(mask)
    #
    # # Random vertical flipping
    # if random.random() > 0.5:
    #     image = TF.vflip(image)
    #     mask = TF.vflip(mask)

    resize = transforms.Resize(size=(image_size, image_size))
    image = resize(image)
    mask = resize(mask)

    # Make gray scale image
    gray_image = TF.to_grayscale(image)

    # Transform to tensor
    image = TF.to_tensor(image)
    mask = TF.to_tensor(mask)
    gray_image = TF.to_tensor(gray_image)

    # Normalize Data
    image = TF.normalize(image, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))

    return image, gray_image, mask