Python albumentations.ShiftScaleRotate() Examples
The following are 5
code examples of albumentations.ShiftScaleRotate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
albumentations
, or try the search function
.
Example #1
Source File: cloud_transform.py From kaggle-understanding-clouds with BSD 2-Clause "Simplified" License | 7 votes |
def get_training_augmentation(resize_to=(320,640), crop_size=(288,576)): print('[get_training_augmentation] crop_size:', crop_size, ', resize_to:', resize_to) train_transform = [ albu.HorizontalFlip(p=0.5), albu.VerticalFlip(p=0.5), albu.ShiftScaleRotate(scale_limit=0.20, rotate_limit=10, shift_limit=0.1, p=0.5, border_mode=cv2.BORDER_CONSTANT, value=0), albu.GridDistortion(p=0.5), albu.Resize(*resize_to), albu.RandomCrop(*crop_size), albu.ChannelShuffle(), albu.InvertImg(), albu.ToGray(), albu.Normalize(), ] return albu.Compose(train_transform)
Example #2
Source File: augmentations.py From yolov3-channel-pruning with MIT License | 5 votes |
def augment(image, boxes): h, w, _ = image.shape labels, boxes_coord = boxes[:, 0], boxes[:, 1:] labels = labels.tolist() boxes_coord = boxes_coord * h # 得到原图尺寸下的坐标(未归一化的坐标) boxes_coord[:, 0] = np.clip(boxes_coord[:, 0]-boxes_coord[:, 2]/2, a_min=0, a_max=None) # 确保x_min和y_min有效 boxes_coord[:, 1] = np.clip(boxes_coord[:, 1]-boxes_coord[:, 3]/2, a_min=0, a_max=None) boxes_coord = boxes_coord.tolist() # [x_min, y_min, width, height] # 在这里设置数据增强的方法 aug = A.Compose([ A.HorizontalFlip(p=0.5), # A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5), # A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=5, border_mode=0, p=0.5) ], bbox_params={'format':'coco', 'label_fields': ['category_id']}) augmented = aug(image=image, bboxes=boxes_coord, category_id=labels) # 经过aug之后,如果把boxes变没了,则返回原来的图片 if augmented['bboxes']: image = augmented['image'] boxes_coord = np.array(augmented['bboxes']) # x_min, y_min, w, h → x, y, w, h boxes_coord[:, 0] = boxes_coord[:, 0] + boxes_coord[:, 2]/2 boxes_coord[:, 1] = boxes_coord[:, 1] + boxes_coord[:, 3]/2 boxes_coord = boxes_coord / h labels = np.array(augmented['category_id'])[:, None] boxes = np.concatenate((labels, boxes_coord), 1) return image, boxes
Example #3
Source File: augmentations.py From YOLOv3-model-pruning with MIT License | 5 votes |
def augment(image, boxes): h, w, _ = image.shape labels, boxes_coord = boxes[:, 0], boxes[:, 1:] labels = labels.tolist() boxes_coord = boxes_coord * h # 得到原图尺寸下的坐标(未归一化的坐标) boxes_coord[:, 0] = np.clip(boxes_coord[:, 0]-boxes_coord[:, 2]/2, a_min=0, a_max=None) # 确保x_min和y_min有效 boxes_coord[:, 1] = np.clip(boxes_coord[:, 1]-boxes_coord[:, 3]/2, a_min=0, a_max=None) boxes_coord = boxes_coord.tolist() # [x_min, y_min, width, height] # 在这里设置数据增强的方法 aug = A.Compose([ A.HorizontalFlip(p=0.5), # A.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=10, val_shift_limit=10, p=0.5), # A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=5, border_mode=0, p=0.5) ], bbox_params={'format':'coco', 'label_fields': ['category_id']}) augmented = aug(image=image, bboxes=boxes_coord, category_id=labels) # 经过aug之后,如果把boxes变没了,则返回原来的图片 if augmented['bboxes']: image = augmented['image'] boxes_coord = np.array(augmented['bboxes']) # x_min, y_min, w, h → x, y, w, h boxes_coord[:, 0] = boxes_coord[:, 0] + boxes_coord[:, 2]/2 boxes_coord[:, 1] = boxes_coord[:, 1] + boxes_coord[:, 3]/2 boxes_coord = boxes_coord / h labels = np.array(augmented['category_id'])[:, None] boxes = np.concatenate((labels, boxes_coord), 1) return image, boxes
Example #4
Source File: example_bbox_keypoint_rotate.py From albumentations_examples with MIT License | 5 votes |
def main(): image = cv2.imread("images/image_1.jpg") keypoints = cv2.goodFeaturesToTrack( cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), maxCorners=100, qualityLevel=0.5, minDistance=5 ).squeeze(1) bboxes = [(kp[0] - 10, kp[1] - 10, kp[0] + 10, kp[1] + 10) for kp in keypoints] disp_image = visualize(image, keypoints, bboxes) plt.figure(figsize=(10, 10)) plt.imshow(cv2.cvtColor(disp_image, cv2.COLOR_RGB2BGR)) plt.tight_layout() plt.show() aug = A.Compose( [A.ShiftScaleRotate(scale_limit=0.1, shift_limit=0.2, rotate_limit=10, always_apply=True)], bbox_params=A.BboxParams(format="pascal_voc", label_fields=["bbox_labels"]), keypoint_params=A.KeypointParams(format="xy"), ) for _i in range(10): data = aug(image=image, keypoints=keypoints, bboxes=bboxes, bbox_labels=np.ones(len(bboxes))) aug_image = data["image"] aug_image = visualize(aug_image, data["keypoints"], data["bboxes"]) plt.figure(figsize=(10, 10)) plt.imshow(cv2.cvtColor(aug_image, cv2.COLOR_RGB2BGR)) plt.tight_layout() plt.show()
Example #5
Source File: test_transforms.py From albumentations with MIT License | 5 votes |
def test_shift_scale_rotate_interpolation(interpolation): image = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8) mask = np.random.randint(low=0, high=2, size=(100, 100), dtype=np.uint8) aug = A.ShiftScaleRotate( shift_limit=(0.2, 0.2), scale_limit=(1.1, 1.1), rotate_limit=(45, 45), interpolation=interpolation, p=1 ) data = aug(image=image, mask=mask) expected_image = F.shift_scale_rotate( image, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=interpolation, border_mode=cv2.BORDER_REFLECT_101 ) expected_mask = F.shift_scale_rotate( mask, angle=45, scale=2.1, dx=0.2, dy=0.2, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REFLECT_101 ) assert np.array_equal(data["image"], expected_image) assert np.array_equal(data["mask"], expected_mask)