Python albumentations.Normalize() Examples
The following are 6
code examples of albumentations.Normalize().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
albumentations
, or try the search function
.
Example #1
Source File: cloud_transform.py From kaggle-understanding-clouds with BSD 2-Clause "Simplified" License | 7 votes |
def get_training_augmentation(resize_to=(320,640), crop_size=(288,576)): print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:', resize_to) train_transform = [ albu.HorizontalFlip(p=0.5), albu.VerticalFlip(p=0.5), albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0), albu.GridDistortion(p=0.5), albu.Resize(*resize_to), albu.RandomCrop(*crop_size), albu.ChannelShuffle(), albu.InvertImg(), albu.ToGray(), albu.Normalize(), ] return albu.Compose(train_transform)
Example #2
Source File: experiment.py From mlcomp with Apache License 2.0 | 6 votes |
def get_test_transforms(): return A.Compose([A.Normalize(mean=(0.485, ), std=(0.229, ))])
Example #3
Source File: cloud_transform.py From kaggle-understanding-clouds with BSD 2-Clause "Simplified" License | 5 votes |
def get_test_augmentation(resize_to=(320,640)): """Add paddings to make image shape divisible by 32""" test_transform = [ albu.Resize(*resize_to), albu.Normalize(), ] return albu.Compose(test_transform)
Example #4
Source File: augmentation.py From EfficientDet.Pytorch with MIT License | 4 votes |
def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.): list_transforms = [] if phase == 'train': list_transforms.extend([ albu.augmentations.transforms.LongestMaxSize( max_size=width, always_apply=True), albu.PadIfNeeded(min_height=height, min_width=width, always_apply=True, border_mode=0, value=[0, 0, 0]), albu.augmentations.transforms.RandomResizedCrop( height=height, width=width, p=0.3), albu.augmentations.transforms.Flip(), albu.augmentations.transforms.Transpose(), albu.OneOf([ albu.RandomBrightnessContrast(brightness_limit=0.5, contrast_limit=0.4), albu.RandomGamma(gamma_limit=(50, 150)), albu.NoOp() ]), albu.OneOf([ albu.RGBShift(r_shift_limit=20, b_shift_limit=15, g_shift_limit=15), albu.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=5), albu.NoOp() ]), albu.CLAHE(p=0.8), albu.HorizontalFlip(p=0.5), albu.VerticalFlip(p=0.5), ]) if(phase == 'test' or phase == 'valid'): list_transforms.extend([ albu.Resize(height=height, width=width) ]) list_transforms.extend([ albu.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1), ToTensor() ]) if(phase == 'test'): return albu.Compose(list_transforms) return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area, min_visibility=min_visibility, label_fields=['category_id']))
Example #5
Source File: test.py From seismic-deeplearning with MIT License | 4 votes |
def test(*options, cfg=None, debug=False): update_config(config, options=options, config_file=cfg) n_classes = config.DATASET.NUM_CLASSES # Start logging load_log_configuration(config.LOG_CONFIG) logger = logging.getLogger(__name__) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") log_dir, _ = os.path.split(config.TEST.MODEL_PATH) # load model: model = getattr(models, config.MODEL.NAME).get_seg_model(config) model.load_state_dict(torch.load(config.TEST.MODEL_PATH), strict=False) model = model.to(device) # Send to GPU if available running_metrics_overall = runningScore(n_classes) # Augmentation section_aug = Compose([Normalize(mean=(config.TRAIN.MEAN,), std=(config.TRAIN.STD,), max_pixel_value=1,)]) splits = ["test1", "test2"] if "Both" in config.TEST.SPLIT else [config.TEST.SPLIT] for sdx, split in enumerate(splits): labels = np.load(path.join(config.DATASET.ROOT, "test_once", split + "_labels.npy")) section_file = path.join(config.DATASET.ROOT, "splits", "section_" + split + ".txt") _write_section_file(labels, section_file) _evaluate_split(split, section_aug, model, device, running_metrics_overall, config, debug=debug) # FINAL TEST RESULTS: score, class_iou = running_metrics_overall.get_scores() logger.info("--------------- FINAL RESULTS -----------------") logger.info(f'Pixel Acc: {score["Pixel Acc: "]:.3f}') for cdx, class_name in enumerate(_CLASS_NAMES): logger.info(f' {class_name}_accuracy {score["Class Accuracy: "][cdx]:.3f}') logger.info(f'Mean Class Acc: {score["Mean Class Acc: "]:.3f}') logger.info(f'Freq Weighted IoU: {score["Freq Weighted IoU: "]:.3f}') logger.info(f'Mean IoU: {score["Mean IoU: "]:0.3f}') # Save confusion matrix: confusion = score["confusion_matrix"] np.savetxt(path.join(log_dir, "confusion.csv"), confusion, delimiter=" ")
Example #6
Source File: dataset.py From kaggle-kuzushiji-2019 with MIT License | 4 votes |
def get_transform( *, train: bool, test_height: int, crop_width: int, crop_height: int, scale_aug: float, color_hue_aug: int, color_sat_aug: int, color_val_aug: int, normalize: bool = True, ) -> Callable: train_initial_size = 3072 # this value should not matter any more? crop_ratio = crop_height / test_height crop_min_max_height = tuple( int(train_initial_size * crop_ratio * (1 + sign * scale_aug)) for sign in [-1, 1]) if train: transforms = [ LongestMaxSizeRandomSizedCrop( max_size=train_initial_size, min_max_height=crop_min_max_height, width=crop_width, height=crop_height, w2h_ratio=crop_width / crop_height, ), A.HueSaturationValue( hue_shift_limit=color_hue_aug, sat_shift_limit=color_sat_aug, val_shift_limit=color_val_aug, ), A.RandomBrightnessContrast(), A.RandomGamma(), ] else: transforms = [ A.LongestMaxSize(max_size=test_height), ] if normalize: transforms.append(A.Normalize()) transforms.extend([ ToTensor(), ]) return A.Compose( transforms, bbox_params={ 'format': 'coco', 'min_area': 0, 'min_visibility': 0.99, 'label_fields': ['labels'], }, )