Python PIL.ImageOps.expand() Examples
The following are 30
code examples of PIL.ImageOps.expand().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
PIL.ImageOps
, or try the search function
.
Example #1
Source File: joint_transforms.py From cross-season-segmentation with MIT License | 7 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize( (tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th) ), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #2
Source File: ppm_utils.py From avocado-vt with GNU General Public License v2.0 | 7 votes |
def add_timestamp(image, timestamp, margin=2): """ Return an image object with timestamp bar added at the bottom. param image: pillow image object param timestamp: timestamp in seconds since the Epoch param margin: timestamp margin, default is 2 """ width, height = image.size font = ImageFont.load_default() watermark = time.strftime('%c', time.localtime(timestamp)) # bar height = text height + top margin + bottom margin bar_height = font.getsize(watermark)[1] + 2 * margin # place bar at the bottom new_image = ImageOps.expand(image, border=(0, 0, 0, bar_height), fill='lightgrey') draw = ImageDraw.Draw(new_image) # place timestamp at the left side of the bar x, y = margin, height + margin draw.text((x, y), watermark, font=font, fill='black') return new_image
Example #3
Source File: augmentations.py From pytorch-semseg with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return (img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return (img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)))
Example #4
Source File: joint_transforms.py From pytorch-semantic-segmentation with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #5
Source File: augmentations.py From PLARD with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #6
Source File: data_utils.py From conditional-motion-propagation with MIT License | 6 votes |
def image_flow_crop(img1, img2, flow, crop_size, phase): assert len(crop_size) == 2 pad_h = max(crop_size[0] - img1.height, 0) pad_w = max(crop_size[1] - img1.width, 0) pad_h_half = int(pad_h / 2) pad_w_half = int(pad_w / 2) if pad_h > 0 or pad_w > 0: flow_expand = np.zeros((img1.height + pad_h, img1.width + pad_w, 2), dtype=np.float32) flow_expand[pad_h_half:pad_h_half+img1.height, pad_w_half:pad_w_half+img1.width, :] = flow flow = flow_expand border = (pad_w_half, pad_h_half, pad_w - pad_w_half, pad_h - pad_h_half) img1 = ImageOps.expand(img1, border=border, fill=(0,0,0)) img2 = ImageOps.expand(img2, border=border, fill=(0,0,0)) if phase == 'train': hoff = int(np.random.rand() * (img1.height - crop_size[0])) woff = int(np.random.rand() * (img1.width - crop_size[1])) else: hoff = (img1.height - crop_size[0]) // 2 woff = (img1.width - crop_size[1]) // 2 img1 = img1.crop((woff, hoff, woff+crop_size[1], hoff+crop_size[0])) img2 = img2.crop((woff, hoff, woff+crop_size[1], hoff+crop_size[0])) flow = flow[hoff:hoff+crop_size[0], woff:woff+crop_size[1], :] offset = (hoff, woff) return img1, img2, flow, offset
Example #7
Source File: general.py From mxbox with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __call__(self, img): """ Args: img (PIL.Image): Image to be cropped. Returns: PIL.Image: Cropped image. """ if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) w, h = img.size th, tw = self.size if w == tw and h == th: return img x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th))
Example #8
Source File: pil_aug_transforms.py From openseg.pytorch with MIT License | 6 votes |
def __call__(self, img, labelmap=None, maskmap=None): assert isinstance(img, Image.Image) assert labelmap is None or isinstance(labelmap, Image.Image) assert maskmap is None or isinstance(maskmap, Image.Image) if random.random() > self.ratio: return img, labelmap, maskmap width, height = img.size left_pad, up_pad, right_pad, down_pad = self.pad target_size = [width + left_pad + right_pad, height + up_pad + down_pad] offset_left = -left_pad offset_up = -up_pad img = ImageOps.expand(img, border=tuple(self.pad), fill=tuple(self.mean)) if maskmap is not None: maskmap = ImageOps.expand(maskmap, border=tuple(self.pad), fill=1) if labelmap is not None: labelmap = ImageOps.expand(labelmap, border=tuple(self.pad), fill=255) return img, labelmap, maskmap
Example #9
Source File: augmentations.py From seismic-deeplearning with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return ( img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST), ) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return ( img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)), )
Example #10
Source File: functional.py From Deep-Exemplar-based-Colorization with MIT License | 6 votes |
def rotate(img, angle, resample=False, expand=False, center=None): """Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows) Args: img (PIL Image): PIL Image to be rotated. angle ({float, int}): In degrees degrees counter clockwise order. resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional): An optional resampling filter. See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. expand (bool, optional): Optional expansion flag. If true, expands the output image to make it large enough to hold the entire rotated image. If false or omitted, make the output image the same size as the input image. Note that the expand flag assumes rotation around the center and no translation. center (2-tuple, optional): Optional center of rotation. Origin is the upper left corner. Default is the center of the image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.rotate(angle, resample, expand, center)
Example #11
Source File: transforms.py From binseg_pytoch with Apache License 2.0 | 6 votes |
def __call__(self, img): """ Args: img (PIL.Image): Image to be cropped. Returns: PIL.Image: Cropped image. """ if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) w, h = img.size th, tw = self.size if w == tw and h == th: return img x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th))
Example #12
Source File: joint_transforms.py From pytorch-hair-segmentation with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #13
Source File: transforms.py From SceneChangeDet with MIT License | 6 votes |
def __call__(self, img): """ Args: img (PIL.Image): Image to be cropped. Returns: PIL.Image: Cropped image. """ if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) w, h = img.size th, tw = self.size if w == tw and h == th: return img if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th))
Example #14
Source File: joint_transforms.py From pytorch-hair-segmentation with MIT License | 6 votes |
def pad_to_target(img, target_height, target_width, label=0): # Pad image with zeros to the specified height and width if needed # This op does nothing if the image already has size bigger than target_height and target_width. w, h = img.size left = top = right = bottom = 0 doit = False if target_width > w: delta = target_width - w left = delta // 2 right = delta - left doit = True if target_height > h: delta = target_height - h top = delta // 2 bottom = delta - top doit = True if doit: img = ImageOps.expand(img, border=(left, top, right, bottom), fill=label) assert img.size[0] >= target_width assert img.size[1] >= target_height return img
Example #15
Source File: transforms.py From cat-net with MIT License | 6 votes |
def __call__(self, img): """ Args: img (PIL.Image): Image to be cropped. Returns: PIL.Image: Cropped image. """ if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) w, h = img.size th, tw = self.size if w == tw and h == th: return img if self.x1 is None: self.x1 = random.randint(0, w - tw) self.y1 = random.randint(0, h - th) return img.crop((self.x1, self.y1, self.x1 + tw, self.y1 + th))
Example #16
Source File: augmentations.py From CAG_UDA with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size tw, th = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return ( img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST), ) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return ( img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)), )
Example #17
Source File: transforms.py From scalpel with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __call__(self, img): """ Args: img (PIL.Image): Image to be cropped. Returns: PIL.Image: Cropped image. """ if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) w, h = img.size th, tw = self.size if w == tw and h == th: return img x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th))
Example #18
Source File: functional.py From SPG with MIT License | 6 votes |
def rotate(img, angle, resample=False, expand=False, center=None): """Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows) Args: img (PIL Image): PIL Image to be rotated. angle ({float, int}): In degrees degrees counter clockwise order. resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional): An optional resampling filter. See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. expand (bool, optional): Optional expansion flag. If true, expands the output image to make it large enough to hold the entire rotated image. If false or omitted, make the output image the same size as the input image. Note that the expand flag assumes rotation around the center and no translation. center (2-tuple, optional): Optional center of rotation. Origin is the upper left corner. Default is the center of the image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.rotate(angle, resample, expand, center)
Example #19
Source File: custom_transforms.py From RMI with MIT License | 6 votes |
def __call__(self, sample): """call method""" image, label = sample['image'], sample['label'] width, height = image.size pad_width, pad_height = max(width, self.crop_width), max(height, self.crop_height) pad_width = self.crop_width - width if width < self.crop_width else 0 pad_height = self.crop_height - height if height < self.crop_height else 0 # pad the image with constant image = ImageOps.expand(image, border=(0, 0, pad_width, pad_height), fill=self.mean) label = ImageOps.expand(label, border=(0, 0, pad_width, pad_height), fill=self.ignore_label) # random crop image to crop_size new_w, new_h = image.size x1 = random.randint(0, new_w - self.crop_width) y1 = random.randint(0, new_h - self.crop_height) image = image.crop((x1, y1, x1 + self.crop_width, y1 + self.crop_height)) label = label.crop((x1, y1, x1 + self.crop_width, y1 + self.crop_height)) return {'image': image, 'label': label}
Example #20
Source File: functional.py From ACoL with MIT License | 6 votes |
def rotate(img, angle, resample=False, expand=False, center=None): """Rotate the image by angle and then (optionally) translate it by (n_columns, n_rows) Args: img (PIL Image): PIL Image to be rotated. angle ({float, int}): In degrees degrees counter clockwise order. resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional): An optional resampling filter. See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST. expand (bool, optional): Optional expansion flag. If true, expands the output image to make it large enough to hold the entire rotated image. If false or omitted, make the output image the same size as the input image. Note that the expand flag assumes rotation around the center and no translation. center (2-tuple, optional): Optional center of rotation. Origin is the upper left corner. Default is the center of the image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.rotate(angle, resample, expand, center)
Example #21
Source File: transforms.py From deep-image-retrieval with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __call__(self, inp): img = F.grab_img(inp) padl = padt = 0 if self.padding > 0: if F.is_pil_image(img): img = ImageOps.expand(img, border=self.padding, fill=0) else: assert isinstance(img, F.DummyImg) img = img.expand(border=self.padding) if isinstance(self.padding, int): padl = padt = self.padding else: padl, padt = self.padding[0:2] i, j, tw, th = self.get_params(img, self.size) img = img.crop((i, j, i+tw, j+th)) return F.update_img_and_labels(inp, img, aff=(1,0,padl-i,0,1,padt-j))
Example #22
Source File: transforms.py From pytorch_segmentation with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #23
Source File: segmentation_augmentations.py From MultiObjectiveOptimization with MIT License | 6 votes |
def __call__(self, img, mask, ins, depth): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) ins = ImageOps.expand(ins, border=self.padding, fill=0) depth = ImageOps.expand(depth, border=self.padding, fill=0) assert img.size == mask.size assert img.size == ins.size assert img.size == depth.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask, ins, depth if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST), ins.resize((tw,th), Image.NEAREST), depth.resize((tw, th), Image.NEAREST) _sysrand = random.SystemRandom() x1 = _sysrand.randint(0, w - tw) y1 = _sysrand.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)), ins.crop((x1, y1, x1 + tw, y1 + th)), depth.crop((x1, y1, x1 + tw, y1 + th))
Example #24
Source File: augmentations.py From LightNet with MIT License | 6 votes |
def __call__(self, img, mask): if self.padding > 0: img = ImageOps.expand(img, border=self.padding, fill=0) mask = ImageOps.expand(mask, border=self.padding, fill=0) assert img.size == mask.size w, h = img.size th, tw = self.size if w == tw and h == th: return img, mask if w < tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
Example #25
Source File: transforms.py From pnn.pytorch with MIT License | 5 votes |
def __call__(self, input): if self.padding > 0: input['img'] = ImageOps.expand(img, border=self.padding, fill=0) w, h = input['img'].size th, tw = self.size if w == tw and h == th: return input x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) input['img'] = input['img'].crop((x1, y1, x1 + tw, y1 + th)) return input
Example #26
Source File: draw.py From ASKCOS with Mozilla Public License 2.0 | 5 votes |
def TrimImgByWhite(img, padding=0): '''This function takes a PIL image, img, and crops it to the minimum rectangle based on its whiteness/transparency. 5 pixel padding used automatically.''' # Convert to array as_array = np.array(img) # N x N x (r,g,b,a) # Set previously-transparent pixels to white if as_array.shape[2] == 4: as_array[as_array[:, :, 3] == 0] = [255, 255, 255, 0] as_array = as_array[:, :, :3] # Content defined as non-white and non-transparent pixel has_content = np.sum(as_array, axis=2, dtype=np.uint32) != 255 * 3 xs, ys = np.nonzero(has_content) # Crop down margin = 5 x_range = max([min(xs) - margin, 0]), min([max(xs) + margin, as_array.shape[0]]) y_range = max([min(ys) - margin, 0]), min([max(ys) + margin, as_array.shape[1]]) as_array_cropped = as_array[ x_range[0]:x_range[1], y_range[0]:y_range[1], 0:3] img = Image.fromarray(as_array_cropped, mode='RGB') return ImageOps.expand(img, border=padding, fill=(255, 255, 255))
Example #27
Source File: linegen.py From kraken with Apache License 2.0 | 5 votes |
def render_line(self, text): """ Draws a line onto a Cairo surface which will be converted to an pillow Image. Args: text (unicode): A string which will be rendered as a single line. Returns: PIL.Image of mode 'L'. Raises: KrakenCairoSurfaceException if the Cairo surface couldn't be created (usually caused by invalid dimensions. """ logger.info('Rendering line \'{}\''.format(text)) logger.debug('Creating temporary cairo surface') temp_surface = cairo.cairo_image_surface_create(0, 0, 0) width, height = _draw_on_surface(temp_surface, self.font, self.language, text) cairo.cairo_surface_destroy(temp_surface) if width == 0 or height == 0: logger.error('Surface for \'{}\' zero pixels in at least one dimension'.format(text)) raise KrakenCairoSurfaceException('Surface zero pixels in at least one dimension', width, height) logger.debug('Creating sized cairo surface') real_surface = cairo.cairo_image_surface_create(0, width, height) _draw_on_surface(real_surface, self.font, self.language, text) logger.debug('Extracing data from real surface') data = cairo.cairo_image_surface_get_data(real_surface) size = int(4 * width * height) buffer = ctypes.create_string_buffer(size) ctypes.memmove(buffer, data, size) logger.debug('Loading data into PIL image') im = Image.frombuffer("RGBA", (width, height), buffer, "raw", "BGRA", 0, 1) cairo.cairo_surface_destroy(real_surface) logger.debug('Expand and grayscale image') im = im.convert('L') im = ImageOps.expand(im, 5, 255) return im
Example #28
Source File: warp.py From open-vot with MIT License | 5 votes |
def pad_pil(image, npad, padding='avg'): if npad == 0: return image if padding == 'avg': avg_chan = ImageStat.Stat(image).mean # PIL doesn't support float RGB image avg_chan = tuple(int(round(c)) for c in avg_chan) image = ImageOps.expand(image, border=npad, fill=avg_chan) else: image = ImageOps.expand(image, border=npad, fill=padding) return image
Example #29
Source File: warp.py From open-vot with MIT License | 5 votes |
def pad(image, npad, padding='avg'): if npad == 0: return image if padding == 'avg': avg_chan = ImageStat.Stat(image).mean # PIL doesn't support float RGB image avg_chan = tuple(int(round(c)) for c in avg_chan) image = ImageOps.expand(image, border=npad, fill=avg_chan) else: image = ImageOps.expand(image, border=npad, fill=padding) return image
Example #30
Source File: transforms.py From landmark-detection with MIT License | 5 votes |
def __call__(self, imgs, point_meta=None): ## AugCrop has something wrong... For unsupervised data point_meta = point_meta.copy() if isinstance(imgs, list): is_list = True else: is_list, imgs = False, [imgs] dice_x, dice_y = random.random(), random.random() x_offset = int( (dice_x-0.5) * 2 * self.center_perterb_max) y_offset = int( (dice_y-0.5) * 2 * self.center_perterb_max) x1 = int(round( point_meta.center[0] + x_offset - self.crop_x / 2. )) y1 = int(round( point_meta.center[1] + y_offset - self.crop_y / 2. )) x2 = x1 + self.crop_x y2 = y1 + self.crop_y w, h = imgs[0].size if x1 < 0 or y1 < 0 or x2 >= w or y2 >= h: pad = max(0-x1, 0-y1, x2-w+1, y2-h+1) assert pad > 0, 'padding operation in crop must be greater than 0' imgs = [ ImageOps.expand(img, border=pad, fill=self.fill) for img in imgs ] x1, x2, y1, y2 = x1 + pad, x2 + pad, y1 + pad, y2 + pad point_meta.apply_offset(pad, pad) point_meta.apply_bound(imgs[0].size[0], imgs[0].size[1]) point_meta.apply_offset(-x1, -y1) imgs = [ img.crop((x1, y1, x2, y2)) for img in imgs ] point_meta.apply_bound(imgs[0].size[0], imgs[0].size[1]) if is_list == False: imgs = imgs[0] return imgs, point_meta