Python PIL.Image.ADAPTIVE Examples
The following are 20
code examples of PIL.Image.ADAPTIVE().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
PIL.Image
, or try the search function
.
Example #1
Source File: extract_resnet_coco.py From show-adapt-and-tell with MIT License | 6 votes |
def extract_image(net, image_file): batch_size = 1 transformer = set_transformer(net) if image_file.split('.')[-1] == 'gif': img = Image.open(image_file).convert("P",palette=Image.ADAPTIVE, colors=256) newfile = ''.join(image_file.split('.')[:-1])+'.png' for i, frame in enumerate(iter_frames(img)): frame.save(newfile,**frame.info) image_file = newfile img = cv2.imread(image_file) img = img.astype('float') / 255 net.blobs['data'].data[:] = transformer.preprocess('data', img) net.forward() blobs_out_pool5 = net.blobs['pool5'].data[0,:,0,0] return blobs_out_pool5
Example #2
Source File: test_image_convert.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_trns_l(self): im = hopper('L') im.info['transparency'] = 128 f = self.tempfile('temp.png') im_rgb = im.convert('RGB') self.assertEqual(im_rgb.info['transparency'], (128, 128, 128)) # undone im_rgb.save(f) im_p = im.convert('P') self.assertIn('transparency', im_p.info) im_p.save(f) im_p = self.assert_warning( UserWarning, im.convert, 'P', palette=Image.ADAPTIVE) self.assertNotIn('transparency', im_p.info) im_p.save(f)
Example #3
Source File: test_image_convert.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_trns_RGB(self): im = hopper('RGB') im.info['transparency'] = im.getpixel((0, 0)) f = self.tempfile('temp.png') im_l = im.convert('L') self.assertEqual(im_l.info['transparency'], im_l.getpixel((0, 0))) # undone im_l.save(f) im_p = im.convert('P') self.assertIn('transparency', im_p.info) im_p.save(f) im_rgba = im.convert('RGBA') self.assertNotIn('transparency', im_rgba.info) im_rgba.save(f) im_p = self.assert_warning( UserWarning, im.convert, 'P', palette=Image.ADAPTIVE) self.assertNotIn('transparency', im_p.info) im_p.save(f)
Example #4
Source File: __init__.py From fontdiffenator with Apache License 2.0 | 6 votes |
def to_cbdt_gif(self, dst): font_a_images = read_cbdt(self._font_a.ttfont) font_b_images = read_cbdt(self._font_b.ttfont) for element in self._data: key_before = element["glyph before"] key_after = element["glyph after"] image_1 = font_a_images[key_before] image_1_gif = Image.new('RGBA', image_1.size, (255, 255, 255)) image_1_gif.paste(image_1, image_1) image_1_gif = image_1_gif.convert('RGB').convert('P', palette=Image.ADAPTIVE) image_2 = font_b_images[key_after] image_2_gif = Image.new('RGBA', image_2.size, (255, 255, 255)) image_2_gif.paste(image_2, image_2) image_2_gif = image_2_gif.convert('RGB').convert('P', palette=Image.ADAPTIVE) img_path = os.path.join(dst, f"{key_before}.gif") image_1_gif.save(img_path, save_all=True, append_images=[image_2_gif], duration=1000, loop=0 )
Example #5
Source File: reduce_colors.py From DeepFaceLab with GNU General Public License v3.0 | 5 votes |
def reduce_colors (img_bgr, n_colors): img_rgb = (img_bgr[...,::-1] * 255.0).astype(np.uint8) img_rgb_pil = Image.fromarray(img_rgb) img_rgb_pil_p = img_rgb_pil.convert('P', palette=Image.ADAPTIVE, colors=n_colors) img_rgb_p = img_rgb_pil_p.convert('RGB') img_bgr = cv2.cvtColor( np.array(img_rgb_p, dtype=np.float32) / 255.0, cv2.COLOR_RGB2BGR ) return img_bgr
Example #6
Source File: img2xls.py From img2xls with MIT License | 5 votes |
def get_col_reduced_palette_image(img): """Returns image reduced to in Excel allowed number of colors.""" cust_col_num_range = (8, 64) col_cnt = cust_col_num_range[1] - cust_col_num_range[0] pal_img = img.convert('P', palette=Image.ADAPTIVE, colors=col_cnt) pal_pixels = pal_img.load() def add_col_offset(x_pos, y_pos): """Add minimum color number to a pixel in palette image.""" pal_pixels[x_pos, y_pos] += cust_col_num_range[0] map2d(pal_img.size, add_col_offset) return pal_img
Example #7
Source File: qrinvite.py From Fox-V3 with GNU Affero General Public License v3.0 | 5 votes |
def convert_webp_to_png(path): im = Image.open(path) im.load() alpha = im.split()[-1] im = im.convert("RGB").convert("P", palette=Image.ADAPTIVE, colors=255) mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) im.paste(255, mask) new_path = path.replace(".webp", ".png") im.save(new_path, transparency=255) return new_path
Example #8
Source File: renderPM.py From stdm with GNU General Public License v2.0 | 5 votes |
def drawToPILP(d, dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_): Image = _getImage() im = drawToPIL(d, dpi=dpi, bg=bg, configPIL=configPIL, showBoundary=showBoundary) return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
Example #9
Source File: renderPM.py From stdm with GNU General Public License v2.0 | 5 votes |
def _convert2pilp(im): Image = _getImage() return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
Example #10
Source File: test_model_raster.py From aerial_mtl with BSD 3-Clause "New" or "Revised" License | 5 votes |
def save_raster_png(self, data, filename): if 'semantics' in filename: from util.util import labels_to_colors image_save = Image.fromarray(np.squeeze(labels_to_colors(data, self.opt.color_palette).astype(np.int8)), mode='RGB').convert('P', palette=Image.ADAPTIVE, colors=256) image_save.save(filename)
Example #11
Source File: test_model_raster_isprs.py From aerial_mtl with BSD 3-Clause "New" or "Revised" License | 5 votes |
def save_raster_png(self, data, filename): if 'semantics' in filename: from util.util import labels_to_colors image_save = Image.fromarray(np.squeeze(labels_to_colors(data, self.opt.color_palette).astype(np.int8)), mode='RGB').convert('P', palette=Image.ADAPTIVE, colors=256) image_save.save(filename)
Example #12
Source File: renderPM.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def drawToPILP(d, dpi=72, bg=0xffffff, configPIL=None, showBoundary=rl_config._unset_): Image = _getImage() im = drawToPIL(d, dpi=dpi, bg=bg, configPIL=configPIL, showBoundary=showBoundary) return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
Example #13
Source File: renderPM.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def _convert2pilp(im): Image = _getImage() return im.convert("P", dither=Image.NONE, palette=Image.ADAPTIVE)
Example #14
Source File: test_file_gif.py From python3_ios with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_palette_handling(self): # see https://github.com/python-pillow/Pillow/issues/513 im = Image.open(TEST_GIF) im = im.convert('RGB') im = im.resize((100, 100), Image.LANCZOS) im2 = im.convert('P', palette=Image.ADAPTIVE, colors=256) f = self.tempfile('temp.gif') im2.save(f, optimize=True) reloaded = Image.open(f) self.assert_image_similar(im, reloaded.convert('RGB'), 10)
Example #15
Source File: img_aux_processing.py From optimize-images with MIT License | 4 votes |
def do_reduce_colors(img: ImageType, max_colors: int) -> Tuple[ImageType, int, int]: """ Reduce the number of colors of an Image object It takes a PIL image object and tries to reduce the total number of colors, converting it to an indexed color (mode P) image. If the input image is in mode 1, it cannot be further reduced, so it's returned back with no changes. :param img: a PIL image in color (modes P, RGBA, RGB, CMYK, YCbCr, LAB or HSV) :param max_colors: an integer indicating the maximum number of colors allowed. :return: a PIL image in mode P (or mode 1, as stated above), an integer indicating the original number of colors (0 if source is not a mode P or mode 1 image) and an integer stating the resulting number of colors. """ orig_mode = img.mode if orig_mode == "1": return img, 2, 2 colors = img.getcolors() if colors: orig_colors = len(colors) else: orig_colors = 0 # Intermediate conversion steps when needed if orig_mode in ["CMYK", "YCbCr", "LAB", "HSV"]: img = img.convert("RGB") elif orig_mode == "LA": img = img.convert("RGBA") # Actual color reduction happening here if orig_mode in ["RGB", "L"]: palette = Image.ADAPTIVE elif orig_mode == "RGBA": palette = Image.ADAPTIVE transparent = Image.new("RGBA", img.size, (0, 0, 0, 0)) # blend with transparent image using own alpha img = Image.composite(img, transparent, img) elif orig_mode == "P": palette = img.getpalette() img = img.convert("RGBA") w, h = img.size alpha_layer = Image.new("L", img.size) for x in range(w): for y in range(h): r, g, b, a = img.getpixel((x, y)) alpha_layer.putpixel((x, y), a) img.putalpha(alpha_layer) else: return img, 0, 0 img = img.convert("P", palette=palette, colors=max_colors) return img, orig_colors, len(img.getcolors())
Example #16
Source File: images2gif.py From Model-Free-Episodic-Control with MIT License | 4 votes |
def writeGif(filename, images, duration=0.1, loops=0, dither=1): """ writeGif(filename, images, duration=0.1, loops=0, dither=1) Write an animated gif from the specified images. images should be a list of numpy arrays of PIL images. Numpy images of type float should have pixels between 0 and 1. Numpy images of other types are expected to have values between 0 and 255. """ if PIL is None: raise RuntimeError("Need PIL to write animated gif files.") AD = Image.ADAPTIVE images2 = [] # convert to PIL for im in images: if isinstance(im,Image.Image): images2.append( im.convert('P', palette=AD, dither=dither) ) elif np and isinstance(im, np.ndarray): if im.dtype == np.uint8: pass elif im.dtype in [np.float32, np.float64]: im = (im*255).astype(np.uint8) else: im = im.astype(np.uint8) # convert if len(im.shape)==3 and im.shape[2]==3: im = Image.fromarray(im,'RGB').convert('P', palette=AD, dither=dither) elif len(im.shape)==2: im = Image.fromarray(im,'L').convert('P', palette=AD, dither=dither) else: raise ValueError("Array has invalid shape to be an image.") images2.append(im) else: raise ValueError("Unknown image type.") # check duration if hasattr(duration, '__len__'): if len(duration) == len(images2): durations = [d for d in duration] else: raise ValueError("len(duration) doesn't match amount of images.") else: durations = [duration for im in images2] # open file fp = open(filename, 'wb') # write try: n = _writeGifToFile(fp, images2, durations, loops) print n, 'frames written' finally: fp.close()
Example #17
Source File: image2gif.py From 3DSkit with GNU General Public License v3.0 | 4 votes |
def convertImagesToPIL(self, images, dither, nq=0, images_info=None): """ convertImagesToPIL(images, nq=0) Convert images to Paletted PIL images, which can then be written to a single animaged GIF. """ # Convert to PIL images images2 = [] for im in images: if isinstance(im, Image.Image): images2.append(im) elif np and isinstance(im, np.ndarray): if im.ndim == 3 and im.shape[2] == 3: im = Image.fromarray(im, 'RGB') elif im.ndim == 3 and im.shape[2] == 4: # im = Image.fromarray(im[:,:,:3],'RGB') self.transparency = True im = Image.fromarray(im[:, :, :4], 'RGBA') elif im.ndim == 2: im = Image.fromarray(im, 'L') images2.append(im) # Convert to paletted PIL images images, images2 = images2, [] if nq >= 1: # NeuQuant algorithm for im in images: im = im.convert("RGBA") # NQ assumes RGBA nqInstance = NeuQuant(im, int(nq)) # Learn colors from image if dither: im = im.convert("RGB").quantize(palette=nqInstance.paletteImage(), colors=255) else: im = nqInstance.quantize(im, colors=255) # Use to quantize the image itself self.transparency = True # since NQ assumes transparency if self.transparency: alpha = im.split()[3] mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) im.paste(255, mask=mask) images2.append(im) else: # Adaptive PIL algorithm AD = Image.ADAPTIVE # for index,im in enumerate(images): for i in range(len(images)): im = images[i].convert('RGB').convert('P', palette=AD, dither=dither, colors=255) if self.transparency: alpha = images[i].split()[3] mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) im.paste(255, mask=mask) images2.append(im) # Done return images2
Example #18
Source File: images2gif_py2.py From Legofy with MIT License | 4 votes |
def convertImagesToPIL(self, images, dither, nq=0,images_info=None): """ convertImagesToPIL(images, nq=0) Convert images to Paletted PIL images, which can then be written to a single animaged GIF. """ # Convert to PIL images images2 = [] for im in images: if isinstance(im, Image.Image): images2.append(im) elif np and isinstance(im, np.ndarray): if im.ndim==3 and im.shape[2]==3: im = Image.fromarray(im,'RGB') elif im.ndim==3 and im.shape[2]==4: # im = Image.fromarray(im[:,:,:3],'RGB') self.transparency = True im = Image.fromarray(im[:,:,:4],'RGBA') elif im.ndim==2: im = Image.fromarray(im,'L') images2.append(im) # Convert to paletted PIL images images, images2 = images2, [] if nq >= 1: # NeuQuant algorithm for im in images: im = im.convert("RGBA") # NQ assumes RGBA nqInstance = NeuQuant(im, int(nq)) # Learn colors from image if dither: im = im.convert("RGB").quantize(palette=nqInstance.paletteImage(),colors=255) else: im = nqInstance.quantize(im,colors=255) # Use to quantize the image itself self.transparency = True # since NQ assumes transparency if self.transparency: alpha = im.split()[3] mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0) im.paste(255,mask=mask) images2.append(im) else: # Adaptive PIL algorithm AD = Image.ADAPTIVE # for index,im in enumerate(images): for i in range(len(images)): im = images[i].convert('RGB').convert('P', palette=AD, dither=dither,colors=255) if self.transparency: alpha = images[i].split()[3] mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0) im.paste(255,mask=mask) images2.append(im) # Done return images2
Example #19
Source File: images2gif_py3.py From Legofy with MIT License | 4 votes |
def convertImagesToPIL(self, images, dither, nq=0,images_info=None): """ convertImagesToPIL(images, nq=0) Convert images to Paletted PIL images, which can then be written to a single animaged GIF. """ # Convert to PIL images images2 = [] for im in images: if isinstance(im, Image.Image): images2.append(im) elif np and isinstance(im, np.ndarray): if im.ndim==3 and im.shape[2]==3: im = Image.fromarray(im,'RGB') elif im.ndim==3 and im.shape[2]==4: # im = Image.fromarray(im[:,:,:3],'RGB') self.transparency = True im = Image.fromarray(im[:,:,:4],'RGBA') elif im.ndim==2: im = Image.fromarray(im,'L') images2.append(im) # Convert to paletted PIL images images, images2 = images2, [] if nq >= 1: # NeuQuant algorithm for im in images: im = im.convert("RGBA") # NQ assumes RGBA nqInstance = NeuQuant(im, int(nq)) # Learn colors from image if dither: im = im.convert("RGB").quantize(palette=nqInstance.paletteImage(),colors=255) else: im = nqInstance.quantize(im,colors=255) # Use to quantize the image itself self.transparency = True # since NQ assumes transparency if self.transparency: alpha = im.split()[3] mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0) im.paste(255,mask=mask) images2.append(im) else: # Adaptive PIL algorithm AD = Image.ADAPTIVE # for index,im in enumerate(images): for i in range(len(images)): im = images[i].convert('RGB').convert('P', palette=AD, dither=dither,colors=255) if self.transparency: alpha = images[i].split()[3] mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0) im.paste(255,mask=mask) images2.append(im) # Done return images2
Example #20
Source File: images2gif.py From music-lab-scripts with MIT License | 4 votes |
def convertImagesToPIL(self, images, dither, nq=0): """ convertImagesToPIL(images, nq=0) Convert images to Paletted PIL images, which can then be written to a single animaged GIF. """ # Convert to PIL images images2 = [] for im in images: if isinstance(im, Image.Image): images2.append(im) elif np and isinstance(im, np.ndarray): if im.ndim==3 and im.shape[2]==3: im = Image.fromarray(im,'RGB') elif im.ndim==3 and im.shape[2]==4: im = Image.fromarray(im[:,:,:3],'RGB') elif im.ndim==2: im = Image.fromarray(im,'L') images2.append(im) # Convert to paletted PIL images images, images2 = images2, [] if nq >= 1: # NeuQuant algorithm for im in images: im = im.convert("RGBA") # NQ assumes RGBA nqInstance = NeuQuant(im, int(nq)) # Learn colors from image if dither: im = im.convert("RGB").quantize(palette=nqInstance.paletteImage()) else: im = nqInstance.quantize(im) # Use to quantize the image itself images2.append(im) else: # Adaptive PIL algorithm AD = Image.ADAPTIVE for im in images: im = im.convert('P', palette=AD, dither=dither) images2.append(im) # Done return images2