Python PIL.ImageOps.mirror() Examples
The following are 30
code examples of PIL.ImageOps.mirror().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
PIL.ImageOps
, or try the search function
.
Example #1
Source File: coco2017_animal.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #2
Source File: notsobot.py From Trusty-cogs with MIT License | 6 votes |
def do_haah(self, b): f = BytesIO() f2 = BytesIO() with wand.image.Image(file=b) as img: h1 = img.clone() h1.transform("50%x100%") h2 = h1.clone() h2.rotate(degree=180) h2.flip() h1.save(file=f) h2.save(file=f2) f.seek(0) f2.seek(0) list_im = [f2, f] imgs = [ImageOps.mirror(Image.open(i).convert("RGBA")) for i in list_im] min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1] imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs]) imgs_comb = Image.fromarray(imgs_comb) final = BytesIO() imgs_comb.save(final, "png") file_size = final.tell() final.seek(0) return final, file_size
Example #3
Source File: voc2007_cat_dog.py From easy-fpn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [VOC2007CatDog.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007CatDog.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007CatDog.preprocess(image, self._image_min_side, self._image_max_side) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #4
Source File: coco2017.py From easy-fpn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [obj.label for obj in annotation.objects] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017.preprocess(image, self._image_min_side, self._image_max_side) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #5
Source File: coco2017_person.py From easy-fpn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #6
Source File: coco2017_animal.py From easy-fpn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #7
Source File: voc2007.py From easy-fpn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, float, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects if not obj.difficult] labels = [VOC2007.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects if not obj.difficult] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007.preprocess(image, self._image_min_side, self._image_max_side) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #8
Source File: voc2007.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects if not obj.difficult] labels = [VOC2007.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects if not obj.difficult] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #9
Source File: augmentation.py From oft with MIT License | 6 votes |
def random_flip(image, calib, objects): if random.random() < 0.5: return image, calib, objects # Flip image image = ImageOps.mirror(image) # Modify calibration matrix width, _ = image.size calib[0, 2] = width - calib[0, 2] # cx' = w - cx calib[0, 3] = width * calib[2, 3] - calib[0, 3] # tx' = w*tz - tx # Flip object x-positions flipped_objects = list() for obj in objects: position = [-obj.position[0]] + obj.position[1:] angle = math.atan2(math.sin(obj.angle), -math.cos(obj.angle)) flipped_objects.append(utils.ObjectData( obj.classname, position, obj.dimensions, angle, obj.score )) return image, calib, flipped_objects
Example #10
Source File: dataset.py From DBPN-Pytorch with MIT License | 6 votes |
def augment(img_in, img_tar, img_bic, flip_h=True, rot=True): info_aug = {'flip_h': False, 'flip_v': False, 'trans': False} if random.random() < 0.5 and flip_h: img_in = ImageOps.flip(img_in) img_tar = ImageOps.flip(img_tar) img_bic = ImageOps.flip(img_bic) info_aug['flip_h'] = True if rot: if random.random() < 0.5: img_in = ImageOps.mirror(img_in) img_tar = ImageOps.mirror(img_tar) img_bic = ImageOps.mirror(img_bic) info_aug['flip_v'] = True if random.random() < 0.5: img_in = img_in.rotate(180) img_tar = img_tar.rotate(180) img_bic = img_bic.rotate(180) info_aug['trans'] = True return img_in, img_tar, img_bic, info_aug
Example #11
Source File: dataset.py From iSeeBetter with MIT License | 6 votes |
def augment(img_in, img_tar, img_nn, flip_h=True, rot=True): info_aug = {'flip_h': False, 'flip_v': False, 'trans': False} if random.random() < 0.5 and flip_h: img_in = ImageOps.flip(img_in) img_tar = ImageOps.flip(img_tar) img_nn = [ImageOps.flip(j) for j in img_nn] info_aug['flip_h'] = True if rot: if random.random() < 0.5: img_in = ImageOps.mirror(img_in) img_tar = ImageOps.mirror(img_tar) img_nn = [ImageOps.mirror(j) for j in img_nn] info_aug['flip_v'] = True if random.random() < 0.5: img_in = img_in.rotate(180) img_tar = img_tar.rotate(180) img_nn = [j.rotate(180) for j in img_nn] info_aug['trans'] = True return img_in, img_tar, img_nn, info_aug
Example #12
Source File: dataset.py From STARnet with MIT License | 6 votes |
def augment(img_in, img_tar, img_tar_l, flip_h=True, rot=True): info_aug = {'flip_h': False, 'flip_v': False, 'trans': False} if random.random() < 0.5 and flip_h: img_in = [ImageOps.flip(j) for j in img_in] img_tar = [ImageOps.flip(j) for j in img_tar] img_tar_l = ImageOps.flip(img_tar_l) info_aug['flip_h'] = True if rot: if random.random() < 0.5: img_in = [ImageOps.mirror(j) for j in img_in] img_tar = [ImageOps.mirror(j) for j in img_tar] img_tar_l = ImageOps.mirror(img_tar_l) info_aug['flip_v'] = True if random.random() < 0.5: img_in = [j.rotate(180) for j in img_in] img_tar = [j.rotate(180) for j in img_tar] img_tar_l = img_tar_l.rotate(180) info_aug['trans'] = True return img_in, img_tar, img_tar_l, info_aug
Example #13
Source File: coco2017_car.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Car.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Car.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Car.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #14
Source File: voc2007_cat_dog.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [VOC2007CatDog.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007CatDog.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007CatDog.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #15
Source File: coco2017_person.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #16
Source File: coco2017_animal.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Animal.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Animal.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Animal.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #17
Source File: AVA.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: bboxes = torch.tensor(self.bboxes[index], dtype=torch.float) labels = torch.tensor(self.labels[index], dtype=torch.long) # print(int(self.image_position[index].split('/')[1])) #image = Image.open(self.path_to_keyframe+'/'+image_index[index].split('/')[0]+'/'+str(int(image_index[index].split('/')[1]))+".jpg") image = Image.open(self.path_to_keyframe+'/'+self.image_position[index]+".jpg") # random flip on only training mode # if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5: # image = ImageOps.mirror(image) # bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively self._image_min_side=600 self._image_max_side=1000 image, scale = self.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return self.image_position[index], image, scale, bboxes, labels
Example #18
Source File: AVA.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: bboxes = torch.tensor(self.bboxes[index], dtype=torch.float) labels = torch.tensor(self.labels[index], dtype=torch.long) # print(int(self.image_position[index].split('/')[1])) #image = Image.open(self.path_to_keyframe+'/'+image_index[index].split('/')[0]+'/'+str(int(image_index[index].split('/')[1]))+".jpg") image = Image.open(self.path_to_keyframe+'/'+self.image_position[index]+".jpg") # random flip on only training mode # if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5: # image = ImageOps.mirror(image) # bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively self._image_min_side=600 self._image_max_side=1000 image, scale = self.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return self.image_position[index], image, scale, bboxes, labels
Example #19
Source File: voc2007.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects if not obj.difficult] labels = [VOC2007.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects if not obj.difficult] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #20
Source File: voc2007_cat_dog.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [VOC2007CatDog.CATEGORY_TO_LABEL_DICT[obj.name] for obj in annotation.objects] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(os.path.join(self._path_to_jpeg_images_dir, annotation.filename)) # random flip on only training mode if self._mode == VOC2007CatDog.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = VOC2007CatDog.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #21
Source File: coco2017.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [obj.label for obj in annotation.objects] bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #22
Source File: coco2017_person.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def __getitem__(self, index: int) -> Tuple[str, Tensor, Tensor, Tensor, Tensor]: image_id = self._image_ids[index] annotation = self._image_id_to_annotation_dict[image_id] bboxes = [obj.bbox.tolist() for obj in annotation.objects] labels = [COCO2017Person.CATEGORY_TO_LABEL_DICT[COCO2017.LABEL_TO_CATEGORY_DICT[obj.label]] for obj in annotation.objects] # mapping from original `COCO2017` dataset bboxes = torch.tensor(bboxes, dtype=torch.float) labels = torch.tensor(labels, dtype=torch.long) image = Image.open(annotation.filename).convert('RGB') # for some grayscale images # random flip on only training mode if self._mode == COCO2017Person.Mode.TRAIN and random.random() > 0.5: image = ImageOps.mirror(image) bboxes[:, [0, 2]] = image.width - bboxes[:, [2, 0]] # index 0 and 2 represent `left` and `right` respectively image, scale = COCO2017Person.preprocess(image, self._image_min_side, self._image_max_side) scale = torch.tensor(scale, dtype=torch.float) bboxes *= scale return image_id, image, scale, bboxes, labels
Example #23
Source File: visual_search.py From visual-search with MIT License | 5 votes |
def mirror(np_img): image = Image.fromarray(np_img) image = ImageOps.mirror(image) data = np.asarray(image) return data
Example #24
Source File: image_mirror.py From visual-search with MIT License | 5 votes |
def main(): image = Image.open(args.input_path) image = ImageOps.mirror(image) # data = np.asarray(image) # print(type(data)) # print(type(data[0,0,0])) # print(data.shape) image.save(args.output_path)
Example #25
Source File: notsobot.py From Trusty-cogs with MIT License | 5 votes |
def flop(self, ctx, urls: ImageFinder = None): """Flip an image""" if urls is None: urls = await ImageFinder().search_for_images(ctx) url = urls[0] async with ctx.typing(): b, mime = await self.bytes_download(url) if b is False: await ctx.send(":warning: **Command download function failed...**") return def flop_img(b): img = Image.open(b) img = ImageOps.mirror(img) final = BytesIO() img.save(final, "png") file_size = final.tell() final.seek(0) return discord.File(final, filename="flop.png"), file_size task = ctx.bot.loop.run_in_executor(None, flop_img, b) try: file, file_size = await asyncio.wait_for(task, timeout=60) except asyncio.TimeoutError: return await ctx.send("That image is too large.") await self.safe_send(ctx, None, file, file_size)
Example #26
Source File: __init__.py From photobooth with GNU Affero General Public License v3.0 | 5 votes |
def capturePreview(self): if self._is_preview: while self._comm.empty(Workers.CAMERA): picture = self._cap.getPreview() if self._rotation is not None: picture = picture.transpose(self._rotation) picture = picture.resize(self._pic_dims.previewSize) picture = ImageOps.mirror(picture) byte_data = BytesIO() picture.save(byte_data, format='jpeg') self._comm.send(Workers.GUI, StateMachine.CameraEvent('preview', byte_data))
Example #27
Source File: notsobot.py From Trusty-cogs with MIT License | 5 votes |
def do_waaw(self, b): f = BytesIO() f2 = BytesIO() with wand.image.Image(file=b) as img: h1 = img.clone() width = int(img.width / 2) if int(img.width / 2) > 0 else 1 h1.crop(width=width, height=int(img.height), gravity="east") h2 = h1.clone() h1.rotate(degree=180) h1.flip() h1.save(file=f) h2.save(file=f2) f.seek(0) f2.seek(0) list_im = [f2, f] imgs = [ImageOps.mirror(Image.open(i).convert("RGBA")) for i in list_im] min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1] imgs_comb = np.hstack([np.asarray(i.resize(min_shape)) for i in imgs]) imgs_comb = Image.fromarray(imgs_comb) final = BytesIO() imgs_comb.save(final, "png") file_size = final.tell() final.seek(0) return final, file_size # Thanks to Iguniisu#9746 for the idea
Example #28
Source File: __init__.py From Semi-supervised-segmentation-cycleGAN with MIT License | 5 votes |
def PILaugment(img, mask): if random.random() > 0.2: (w, h) = img.size (w_, h_) = mask.size assert (w == w_ and h == h_), 'The size should be the same.' crop = random.uniform(0.45, 0.75) W = int(crop * w) H = int(crop * h) start_x = w - W start_y = h - H x_pos = int(random.uniform(0, start_x)) y_pos = int(random.uniform(0, start_y)) img = img.crop((x_pos, y_pos, x_pos + W, y_pos + H)) mask = mask.crop((x_pos, y_pos, x_pos + W, y_pos + H)) if random.random() > 0.2: img = ImageOps.flip(img) mask = ImageOps.flip(mask) if random.random() > 0.2: img = ImageOps.mirror(img) mask = ImageOps.mirror(mask) if random.random() > 0.2: angle = random.random() * 90 - 45 img = img.rotate(angle) mask = mask.rotate(angle) if random.random() > 0.95: img = img.filter(ImageFilter.GaussianBlur(2)) if random.random() > 0.95: img = ImageEnhance.Contrast(img).enhance(1) if random.random() > 0.95: img = ImageEnhance.Brightness(img).enhance(1) return img, mask
Example #29
Source File: dataset.py From IntroVAE with MIT License | 5 votes |
def load_image(file_path, input_height=128, input_width=None, output_height=128, output_width=None, crop_height=None, crop_width=None, is_random_crop=True, is_mirror=True, is_gray=False): if input_width is None: input_width = input_height if output_width is None: output_width = output_height if crop_width is None: crop_width = crop_height img = Image.open(file_path) if is_gray is False and img.mode is not 'RGB': img = img.convert('RGB') if is_gray and img.mode is not 'L': img = img.convert('L') if is_mirror and random.randint(0,1) is 0: img = ImageOps.mirror(img) if input_height is not None: img = img.resize((input_width, input_height),Image.BICUBIC) if crop_height is not None: [w, h] = img.size if is_random_crop: #print([w,cropSize]) cx1 = random.randint(0, w-crop_width) cx2 = w - crop_width - cx1 cy1 = random.randint(0, h-crop_height) cy2 = h - crop_height - cy1 else: cx2 = cx1 = int(round((w-crop_width)/2.)) cy2 = cy1 = int(round((h-crop_height)/2.)) img = ImageOps.crop(img, (cx1, cy1, cx2, cy2)) img = img.resize((output_width, output_height),Image.BICUBIC) return img
Example #30
Source File: image_processing.py From ICCV2019-Horde with MIT License | 5 votes |
def compute(self, x: Image): return ImageOps.mirror(x)