Python albumentations.RandomGamma() Examples

The following are 4 code examples of albumentations.RandomGamma(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module albumentations , or try the search function .
Example #1
Source File: dataset.py    From kaggle-kuzushiji-2019 with MIT License 5 votes vote down vote up
def get_transform(train: bool) -> Callable:
    train_initial_size = 2048
    crop_min_max_height = (400, 533)
    crop_width = 512
    crop_height = 384
    if train:
        transforms = [
            A.LongestMaxSize(max_size=train_initial_size),
            A.RandomSizedCrop(
                min_max_height=crop_min_max_height,
                width=crop_width,
                height=crop_height,
                w2h_ratio=crop_width / crop_height,
            ),
            A.HueSaturationValue(
                hue_shift_limit=7,
                sat_shift_limit=10,
                val_shift_limit=10,
            ),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ]
    else:
        test_size = int(train_initial_size *
                        crop_height / np.mean(crop_min_max_height))
        print(f'Test image max size {test_size} px')
        transforms = [
            A.LongestMaxSize(max_size=test_size),
        ]
    transforms.extend([
        ToTensor(),
    ])
    return A.Compose(
        transforms,
        bbox_params={
            'format': 'coco',
            'min_area': 0,
            'min_visibility': 0.5,
            'label_fields': ['labels'],
        },
    ) 
Example #2
Source File: augmentation.py    From EfficientDet.Pytorch with MIT License 4 votes vote down vote up
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.augmentations.transforms.LongestMaxSize(
                max_size=width, always_apply=True),
            albu.PadIfNeeded(min_height=height, min_width=width,
                             always_apply=True, border_mode=0, value=[0, 0, 0]),
            albu.augmentations.transforms.RandomResizedCrop(
                height=height,
                width=width, p=0.3),
            albu.augmentations.transforms.Flip(),
            albu.augmentations.transforms.Transpose(),
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.5,
                                              contrast_limit=0.4),
                albu.RandomGamma(gamma_limit=(50, 150)),
                albu.NoOp()
            ]),
            albu.OneOf([
                albu.RGBShift(r_shift_limit=20, b_shift_limit=15,
                              g_shift_limit=15),
                albu.HueSaturationValue(hue_shift_limit=5,
                                        sat_shift_limit=5),
                albu.NoOp()
            ]),
            albu.CLAHE(p=0.8),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
        ])
    if(phase == 'test' or phase == 'valid'):
        list_transforms.extend([
            albu.Resize(height=height, width=width)
        ])
    list_transforms.extend([
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225), p=1),
        ToTensor()
    ])
    if(phase == 'test'):
        return albu.Compose(list_transforms)
    return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area,
                                                                     min_visibility=min_visibility, label_fields=['category_id'])) 
Example #3
Source File: dataset.py    From kaggle-kuzushiji-2019 with MIT License 4 votes vote down vote up
def get_transform(
        *,
        train: bool,
        test_height: int,
        crop_width: int,
        crop_height: int,
        scale_aug: float,
        color_hue_aug: int,
        color_sat_aug: int,
        color_val_aug: int,
        normalize: bool = True,
        ) -> Callable:
    train_initial_size = 3072  # this value should not matter any more?
    crop_ratio = crop_height / test_height
    crop_min_max_height = tuple(
        int(train_initial_size * crop_ratio * (1 + sign * scale_aug))
        for sign in [-1, 1])
    if train:
        transforms = [
            LongestMaxSizeRandomSizedCrop(
                max_size=train_initial_size,
                min_max_height=crop_min_max_height,
                width=crop_width,
                height=crop_height,
                w2h_ratio=crop_width / crop_height,
            ),
            A.HueSaturationValue(
                hue_shift_limit=color_hue_aug,
                sat_shift_limit=color_sat_aug,
                val_shift_limit=color_val_aug,
            ),
            A.RandomBrightnessContrast(),
            A.RandomGamma(),
        ]
    else:
        transforms = [
            A.LongestMaxSize(max_size=test_height),
        ]
    if normalize:
        transforms.append(A.Normalize())
    transforms.extend([
        ToTensor(),
    ])
    return A.Compose(
        transforms,
        bbox_params={
            'format': 'coco',
            'min_area': 0,
            'min_visibility': 0.99,
            'label_fields': ['labels'],
        },
    ) 
Example #4
Source File: apolloscape.py    From pytorch-segmentation with MIT License 4 votes vote down vote up
def __init__(self,
                 base_dir='../../data/apolloscape',
                 road_record_list=[{'road':'road02_seg','record':[22, 23, 24, 25, 26]}, {'road':'road03_seg', 'record':[7, 8, 9, 10, 11, 12]}],
                 split='train',
                 ignore_index=255,
                 debug=False):
        self.debug = debug
        self.base_dir = Path(base_dir)
        self.ignore_index = ignore_index
        self.split = split
        self.img_paths = []
        self.lbl_paths = []

        for road_record in road_record_list:
          self.road_dir = self.base_dir / Path(road_record['road'])
          self.record_list = road_record['record']

          for record in self.record_list:
            img_paths_tmp = self.road_dir.glob(f'ColorImage/Record{record:03}/Camera 5/*.jpg')
            lbl_paths_tmp = self.road_dir.glob(f'Label/Record{record:03}/Camera 5/*.png')

            img_paths_basenames = {Path(img_path.name).stem for img_path in img_paths_tmp}
            lbl_paths_basenames = {Path(lbl_path.name).stem.replace('_bin', '') for lbl_path in lbl_paths_tmp}

            intersection_basenames = img_paths_basenames & lbl_paths_basenames

            img_paths_intersection = [self.road_dir / Path(f'ColorImage/Record{record:03}/Camera 5/{intersection_basename}.jpg')
                                      for intersection_basename in intersection_basenames]
            lbl_paths_intersection = [self.road_dir / Path(f'Label/Record{record:03}/Camera 5/{intersection_basename}_bin.png')
                                      for intersection_basename in intersection_basenames]

            self.img_paths += img_paths_intersection
            self.lbl_paths += lbl_paths_intersection

        self.img_paths.sort()
        self.lbl_paths.sort()
        print(len(self.img_paths), len(self.lbl_paths))
        assert len(self.img_paths) == len(self.lbl_paths)

        self.resizer = albu.Resize(height=512, width=1024)
        self.augmenter = albu.Compose([albu.HorizontalFlip(p=0.5),
                                       # albu.RandomRotate90(p=0.5),
                                       albu.Rotate(limit=10, p=0.5),
                                       # albu.CLAHE(p=0.2),
                                       # albu.RandomContrast(p=0.2),
                                       # albu.RandomBrightness(p=0.2),
                                       # albu.RandomGamma(p=0.2),
                                       # albu.GaussNoise(p=0.2),
                                       # albu.Cutout(p=0.2)
                                       ])
        self.img_transformer = transforms.Compose([transforms.ToTensor(),
                                                   transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                                        std=[0.229, 0.224, 0.225])])
        self.lbl_transformer = torch.LongTensor