Python skimage.transform.rotate() Examples
The following are 30
code examples of skimage.transform.rotate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
skimage.transform
, or try the search function
.
Example #1
Source File: deskew.py From Alyn with MIT License | 6 votes |
def deskew(self): img = io.imread(self.input_file) res = self.skew_obj.process_single_file() angle = res['Estimated Angle'] if angle >= 0 and angle <= 90: rot_angle = angle - 90 + self.r_angle if angle >= -45 and angle < 0: rot_angle = angle - 90 + self.r_angle if angle >= -90 and angle < -45: rot_angle = 90 + angle + self.r_angle rotated = rotate(img, rot_angle, resize=True) if self.display_image: self.display(rotated) if self.output_file: self.saveImage(rotated*255)
Example #2
Source File: JN721.py From facial_expressions with Apache License 2.0 | 6 votes |
def transform_img__(self, img, fn, emotion): self.images[fn] = {'Image': img, 'Emotion': emotion} # Store original counter = 0 self.images["Trans" + str(counter) + "_" + fn] = {'Image': np.fliplr(img), 'Emotion': emotion} # FLIP the image counter += 1 for deg in range(-10, 15, 5): # ROTATE to be robust to camera orientation if deg == 0: continue self.images["Trans" + str(counter) + "_" + fn] = {'Image': tf.rotate(img, deg), 'Emotion': emotion} counter += 1 lenX, lenY = img.shape # CROP based on rough heuristic for crop_size in range(8, 14, 2): cropped = img[lenX / crop_size: - lenX / crop_size, lenY / crop_size: - lenY / crop_size] self.images["Trans" + str(counter) + "_" + fn] = {'Image': cropped, 'Emotion': emotion} counter += 1 for i in range(2): # SCALE down images (random factor btw 1.1 to 1.21) scale_factor = math.sqrt(1.1) ** np.random.randint(2, 5) scaled_img = tf.warp(img, tf.AffineTransform(scale=(scale_factor, scale_factor))) self.images["Trans" + str(counter) + "_" + fn] = {'Image': scaled_img, 'Emotion': emotion} counter += 1
Example #3
Source File: submission.py From u-net with MIT License | 6 votes |
def augmentation(image, org_width=160,org_height=224, width=190, height=262): max_angle=20 image=resize(image,(width,height)) angle=np.random.randint(max_angle) if np.random.randint(2): angle=-angle image=rotate(image,angle,resize=True) xstart=np.random.randint(width-org_width) ystart=np.random.randint(height-org_height) image=image[xstart:xstart+org_width,ystart:ystart+org_height] if np.random.randint(2): image=cv2.flip(image,1) if np.random.randint(2): image=cv2.flip(image,0) # image=resize(image,(org_width,org_height)) print(image.shape) plt.imshow(image) plt.show()
Example #4
Source File: data_generator.py From pOSAL with MIT License | 6 votes |
def polar_transform(img, mask, img_size, mode='train'): random_rotate_seed = 3 * 90 width_shift = 0 heigth_shift = 0 if mode == 'train': random_rotate_seed = random.randint(0,3) * 90 width_shift = random.randint(0,40) - 20 heigth_shift = random.randint(0,40) - 20 img = rotate(cv2.linearPolar(img, (img_size / 2 + width_shift, img_size / 2 + heigth_shift), img_size / 2 - 20, cv2.WARP_FILL_OUTLIERS), random_rotate_seed) mask = rotate(cv2.linearPolar(mask, (img_size / 2 + width_shift, img_size / 2 + heigth_shift), img_size / 2 - 20, cv2.WARP_FILL_OUTLIERS), random_rotate_seed) return img, mask
Example #5
Source File: rotate_images.py From AI_in_Medicine_Clinical_Imaging_Classification with MIT License | 6 votes |
def rotate_images(file_path, degrees_of_rotation, lst_imgs): ''' Rotates image based on a specified amount of degrees INPUT file_path: file path to the folder containing images. degrees_of_rotation: Integer, specifying degrees to rotate the image. Set number from 1 to 360. lst_imgs: list of image strings. OUTPUT Images rotated by the degrees of rotation specififed. ''' for l in lst_imgs: img = io.imread(file_path + str(l) + '.jpeg') img = rotate(img, degrees_of_rotation) io.imsave(file_path + str(l) + '_' + str(degrees_of_rotation) + '.jpeg', img)
Example #6
Source File: rotate_images.py From eyenet with MIT License | 6 votes |
def rotate_images(file_path, degrees_of_rotation, lst_imgs): ''' Rotates image based on a specified amount of degrees INPUT file_path: file path to the folder containing images. degrees_of_rotation: Integer, specifying degrees to rotate the image. Set number from 1 to 360. lst_imgs: list of image strings. OUTPUT Images rotated by the degrees of rotation specififed. ''' for l in lst_imgs: img = io.imread(file_path + str(l) + '.jpeg') img = rotate(img, degrees_of_rotation) io.imsave(file_path + str(l) + '_' + str(degrees_of_rotation) + '.jpeg', img)
Example #7
Source File: datagen_coco.py From Convolutional-Pose-Machine-tf with GNU Lesser General Public License v3.0 | 6 votes |
def _augment(self, img, hm, mask, max_rotation=30, angle=None): """ # TODO : IMPLEMENT DATA AUGMENTATION """ if random.choice([0, 1]): if angle==None: r_angle = np.random.randint(-1 * max_rotation, max_rotation) else: r_angle = angle ''' # old version img = transform.rotate(img, r_angle, preserve_range=True) hm = transform.rotate(hm, r_angle) rmask = transform.rotate(rmask, r_angle) ''' img = self._rotate_img(img, r_angle) hm = self._rotate_hm(hm, r_angle) mask = self._rotate_mask(mask, r_angle) return img, hm, mask # ----------------------- Batch Generator ----------------------------------
Example #8
Source File: datagen.py From Convolutional-Pose-Machine-tf with GNU Lesser General Public License v3.0 | 6 votes |
def _rotate_bboxes(self, bbox_list, r_angle, hm): """ rotate augmentation """ # angle 2 radian radian = r_angle * math.pi / 180 # rotation matrix rotation_mat = np.array([[np.cos(radian), np.sin(radian)], [-np.sin(radian), np.cos(radian)]]) # rotate the bounding box del_ind = [] for ind in range(bbox_list.shape[0]): rel_pos = np.dot( rotation_mat, bbox_list[ind, :2]-np.round(np.array(hm.shape[:2])/2.0)) bbox_list[ind, :2] = rel_pos + np.round(np.array(hm.shape[:2])/2.0) bbox_list[ind, 2:4] = np.dot( np.abs(rotation_mat), bbox_list[ind, 2:4]) if (bbox_list[ind] < 0).any() or (bbox_list[ind, :2] > hm.shape[:2]).any(): del_ind.append(ind) bbox_list = np.delete(bbox_list, del_ind, 0) return bbox_list
Example #9
Source File: prepare_omniglot.py From few-shot with MIT License | 6 votes |
def handle_alphabet(folder): print('{}...'.format(folder.split('/')[-1])) for rotate in [0, 90, 180, 270]: # Create new folders for each augmented alphabet mkdir(f'{folder}.{rotate}') for root, character_folders, _ in os.walk(folder): for character_folder in character_folders: # For each character folder in an alphabet rotate and resize all of the images and save # to the new folder handle_characters(folder, root + '/' + character_folder, rotate) # return # Delete original alphabet rmdir(folder) # Clean up previous extraction
Example #10
Source File: image_folder.py From CGIntrinsics with MIT License | 5 votes |
def DA(self, img, mode, random_pos, random_filp): if random_filp > 0.5: img = np.fliplr(img) # img = rotate(img,random_angle, order = mode) img = img[random_pos[0]:random_pos[1], random_pos[2]:random_pos[3], :] img = resize(img, (self.height, self.width), order = mode) return img
Example #11
Source File: jhamski.py From facial_expressions with Apache License 2.0 | 5 votes |
def rotate120(img, name): img = tf.rotate(img, angle = 120) save_image(img, name, 'rotate120')
Example #12
Source File: dataset.py From deeppose with GNU General Public License v2.0 | 5 votes |
def __init__(self, csv_fn, img_dir, im_size, fliplr, rotate, rotate_range, zoom, base_zoom, zoom_range, translate, translate_range, min_dim, coord_normalize, gcn, joint_num, fname_index, joint_index, symmetric_joints, ignore_label): for key, val in locals().items(): setattr(self, key, val) self.symmetric_joints = json.loads(symmetric_joints) self.load_images() logging.info('{} is ready'.format(csv_fn))
Example #13
Source File: dataset.py From deeppose with GNU General Public License v2.0 | 5 votes |
def apply_rotate(self, image, joints, ignore_joints): available_joints = self.get_available_joints(joints, ignore_joints) joint_center = self.calc_joint_center(available_joints) angle = np.random.randint(0, self.rotate_range) image = transform.rotate(image, angle, center=joint_center) image = (image * 255).astype(np.uint8) theta = -np.radians(angle) c, s = np.cos(theta), np.sin(theta) rot_mat = np.matrix([[c, -s], [s, c]]) joints = rot_mat.dot((joints - joint_center).T).T + joint_center return image, np.array(joints.tolist())
Example #14
Source File: dataset.py From deeppose with GNU General Public License v2.0 | 5 votes |
def get_example(self, i): img_id, joints = self.joints[i] image = self.images[img_id] ignore_joints, bbox_w, bbox_h, cx, cy = self.info[i] if self.rotate: image, joints = self.apply_rotate(image, joints, ignore_joints) if self.translate: image, joitns = self.apply_translate(image, joints) if self.zoom: image, joints, cx, cy = self.apply_zoom(image, joints, cx, cy) image, joints = self.crop_reshape( image, joints, bbox_w, bbox_h, cx, cy) if self.fliplr and np.random.randint(0, 2) == 1: image, joints = self.apply_fliplr(image, joints) if self.coord_normalize: image, joints = self.apply_coord_normalize(image, joints) if self.gcn: image, joints = self.apply_gcn(image, joints) image = image.astype(np.float32).transpose(2, 0, 1) joints = joints.astype(np.float32).flatten() ignore_joints = np.array(ignore_joints, dtype=np.int32).flatten() return image, joints, ignore_joints
Example #15
Source File: train_res.py From u-net with MIT License | 5 votes |
def augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262): max_angle=20 image=resize(image,(width,height)) imageB=resize(imageB,(width,height)) angle=np.random.randint(max_angle) if np.random.randint(2): angle=-angle image=rotate(image,angle,resize=True) imageB=rotate(imageB,angle,resize=True) xstart=np.random.randint(width-org_width) ystart=np.random.randint(height-org_height) image=image[xstart:xstart+org_width,ystart:ystart+org_height] imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height] if np.random.randint(2): image=cv2.flip(image,1) imageB=cv2.flip(imageB,1) if np.random.randint(2): imageB=cv2.flip(imageB,0) # image=resize(image,(org_width,org_height)) return image,imageB # print(image.shape) # plt.imshow(image) # plt.show() # Helper to build a conv -> BN -> relu block
Example #16
Source File: square_crop.py From 3d-dl with MIT License | 5 votes |
def random_rot(image): angles = [0, 90, 180, 270] i = np.random.randint(0,3) angle = angles[i] return rotate(image, angle)
Example #17
Source File: train.py From u-net with MIT License | 5 votes |
def augmentation(image, imageB, org_width=160,org_height=224, width=190, height=262): max_angle=20 image=cv2.resize(image,(height,width)) imageB=cv2.resize(imageB,(height,width)) angle=np.random.randint(max_angle) if np.random.randint(2): angle=-angle image=rotate(image,angle,resize=True) imageB=rotate(imageB,angle,resize=True) xstart=np.random.randint(width-org_width) ystart=np.random.randint(height-org_height) image=image[xstart:xstart+org_width,ystart:ystart+org_height] imageB=imageB[xstart:xstart+org_width,ystart:ystart+org_height] if np.random.randint(2): image=cv2.flip(image,1) imageB=cv2.flip(imageB,1) if np.random.randint(2): image=cv2.flip(image,0) imageB=cv2.flip(imageB,0) image=cv2.resize(image,(org_height,org_width)) imageB=cv2.resize(imageB,(org_height,org_width)) return image,imageB # print(image.shape) # plt.imshow(image) # plt.show()
Example #18
Source File: image_folder.py From CGIntrinsics with MIT License | 5 votes |
def DA(self, img, mode, random_pos, random_filp): if random_filp > 0.5: img = np.fliplr(img) # img = rotate(img,random_angle, order = mode) img = img[random_pos[0]:random_pos[1], random_pos[2]:random_pos[3], :] img = resize(img, (self.height, self.width), order = mode) return img
Example #19
Source File: image_folder.py From CGIntrinsics with MIT License | 5 votes |
def DA(self, img, mode, random_pos, random_filp, h, w): if random_filp > 0.5: img = np.fliplr(img) # img = rotate(img,random_angle, order = mode) img = img[random_pos[0]:random_pos[1], random_pos[2]:random_pos[3], :] img = resize(img, (h, w), order = mode) return img
Example #20
Source File: image_tfs.py From tanda with MIT License | 5 votes |
def TF_rotate(x, angle=0.0, target=None): assert len(x.shape) == 3 h, w, nc = x.shape # Rotate using edge fill mode return rotate(x, angle, mode='edge', order=1)
Example #21
Source File: transform.py From brain-segmentation-pytorch with MIT License | 5 votes |
def __call__(self, sample): image, mask = sample angle = np.random.uniform(low=-self.angle, high=self.angle) image = rotate(image, angle, resize=False, preserve_range=True, mode="constant") mask = rotate( mask, angle, resize=False, order=0, preserve_range=True, mode="constant" ) return image, mask
Example #22
Source File: util.py From polar-transformer-networks with MIT License | 5 votes |
def predict_testaug(model, X, batchsize=None, angs=None): """ Run predictions w/ test time rotation augmentation by angs""" preds = [] for a in angs: print('rotating test set by angle: {:.2f}...'.format(a)) rotX = np.stack(Parallel(n_jobs=-1)(delayed(rotate) (im, a, preserve_range=True) for im in X)) preds.append(predict_batches(model, rotX, batchsize=batchsize)) combined = sum([softmax(p, axis=1) for p in preds]) return combined
Example #23
Source File: simplified_image_classifier.py From ramp-workflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _image_transform(x, transforms): from skimage.transform import rotate for t in transforms: if t['name'] == 'rotate': angle = np.random.random() * ( t['u_angle'] - t['l_angle']) + t['l_angle'] rotate(x, angle, preserve_range=True) return x
Example #24
Source File: augmentation.py From road_building_extraction with MIT License | 5 votes |
def __call__(self, sample): sat_img = transform.rotate(sample['sat_img'], self.angle, self.resize) map_img = transform.rotate(sample['map_img'], self.angle, self.resize) return {'sat_img': sat_img, 'map_img': map_img}
Example #25
Source File: augmentation.py From gconv_experiments with MIT License | 5 votes |
def rotate_transform_batch(x, rotation=None): r = np.random.uniform(-0.5, 0.5, size=x.shape[0]) * rotation # hack; skimage.transform wants float images to be in [-1, 1] factor = np.maximum(np.max(x), np.abs(np.min(x))) x = x / factor x_out = np.empty_like(x) for i in range(x.shape[0]): x_out[i, 0] = tf.rotate(x[i, 0], r[i]) x_out *= factor return x_out
Example #26
Source File: test_main.py From pyelastix with MIT License | 5 votes |
def test_register_affine_gray(): # Get fixed image image_fixed = imageio.imread('imageio:chelsea.png') image_fixed = color.rgb2gray(image_fixed) # Generate moving image image_moving = transform.rotate(image_fixed, angle=15, resize=True) # Convert both images to float32 image_fixed = image_fixed.astype('float32') image_moving = image_moving.astype('float32') # Initialize and adjust the parameters params = pyelastix.get_default_params(type='AFFINE') params.FixedInternalImagePixelType = "float" params.MovingInternalImagePixelType = "float" params.ResultImagePixelType = "float" params.NumberOfResolutions = 3 params.MaximumNumberOfIterations = 1000 # Register image_registered, field = pyelastix.register( image_moving, image_fixed, params) # Check the results assert image_registered == pytest.approx(image_fixed, rel=1)
Example #27
Source File: crop.py From whale-2015 with Apache License 2.0 | 5 votes |
def crop(path, bonnet, blowhole): im = io.imread(path).astype(np.uint8) if doscale == 1: bonnet['y'] *= float(im.shape[0]) / imwidth bonnet['x'] *= float(im.shape[1]) / imwidth blowhole['y'] *= float(im.shape[0]) / imwidth blowhole['x'] *= float(im.shape[1]) / imwidth y = bonnet['y'] - blowhole['y'] x = bonnet['x'] - blowhole['x'] dist = math.hypot(x, y) minh = 10 minw = 20 croph = int((im.shape[0] - 1.0 * dist) // 2) cropw = int((im.shape[1] - 2.0 * dist) // 2) newh = im.shape[0] - 2 * croph neww = im.shape[1] - 2 * cropw if croph <= 0 or cropw <= 0 or newh < minh or neww < minw: print(' %s unchanged' % os.path.basename(path)) else: angle = math.atan2(y, x) * 180 / math.pi centery = 0.4 * bonnet['y'] + 0.6 * blowhole['y'] centerx = 0.4 * bonnet['x'] + 0.6 * blowhole['x'] center = (centerx, centery) im = tf.rotate(im, angle, resize=False, center=center, preserve_range=True) imcenter = (im.shape[1] / 2, im.shape[0] / 2) trans = (center[0] - imcenter[0], center[1] - imcenter[1]) tform = tf.SimilarityTransform(translation=trans) im = tf.warp(im, tform) im = im[croph:-croph, cropw:-cropw] path = os.path.join(dstdir, os.path.basename(path)) io.imsave(path, im.astype(np.uint8)) return im.shape[0], im.shape[1]
Example #28
Source File: loaders.py From open-solution-mapping-challenge with MIT License | 5 votes |
def per_channel_rotation(x, angle): x_ = x.copy() for i, channel in enumerate(x): x_[i, :, :] = rotate(channel, angle, preserve_range=True) return x_
Example #29
Source File: loaders.py From open-solution-mapping-challenge with MIT License | 5 votes |
def test_time_augmentation_transform(image, tta_parameters): if tta_parameters['ud_flip']: image = np.flipud(image) elif tta_parameters['lr_flip']: image = np.fliplr(image) elif tta_parameters['color_shift']: random_color_shift = reseed(color_seq, deterministic=False) image = random_color_shift.augment_image(image) image = rotate(image, tta_parameters['rotation'], preserve_range=True) return image
Example #30
Source File: run_skimage.py From recipy with Apache License 2.0 | 5 votes |
def io_imsave(self): """ Use sklearn.io.imread to read image.tiff and sklearn.io.imsave to save out_image.tiff. """ file_name = os.path.join(self.data_dir, "image.tiff") out_file_name = os.path.join(self.data_dir, "out_image.tiff") data = io.imread(file_name) data = transform.rotate(data, 90) io.imsave(out_file_name, data) os.remove(out_file_name)