Python Image.fromarray() Examples

The following are 29 code examples of Image.fromarray(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module Image , or try the search function .
Example #1
Source File: convert_to_lmdb.py    From tensorflow-recipes with Apache License 2.0 5 votes vote down vote up
def get_data(self):
        lmdb = "/datasets/celebHQ/celeb_hq.lmdb"
        ds = LMDBDataPoint(lmdb, shuffle=True)
        ds = ImageDecode(ds, index=0)
        ds.reset_state()
        resample = Image.BICUBIC

        self.remainingImages = ds.size()

        for dp in ds.get_data():
            # read image
            bgr = dp[0]

            # convert to Pil Image and resize

            rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
            pil_im = Image.fromarray(rgb)
            pil_im = pil_im.resize((self.image_size, self.image_size), resample=resample)

            # convert back to opencv fomat
            resized = np.array(pil_im)
            resized = resized[:, :, ::-1].copy()

            # beak for less images
            self.remainingImages -= 1

            print self.remainingImages
            # if (self.remainingImages < 29950):
            #     break
            yield [resized] 
Example #2
Source File: ChangeHue.py    From pokemon with MIT License 5 votes vote down vote up
def changeHue(filename, offset):
    image = Image.open(start_dir+filename)
    image = image.convert('RGBA')
    arr = np.array(np.asarray(image).astype('float'))
    new_img = Image.fromarray(shift_hue(arr, offset/360.).astype('uint8'), 'RGBA')
    new_img.save(end_dir+filename) 
Example #3
Source File: CloudJump2.py    From pythonista-scripts with MIT License 5 votes vote down vote up
def crop_image(cls, img):
        image_data = numpy.asarray(img)
        image_data_bw = image_data.max(axis=2)
        non_empty_columns = numpy.where(image_data_bw.max(axis=0)>0)[0]
        non_empty_rows    = numpy.where(image_data_bw.max(axis=1)>0)[0]
        crop_box = (min(non_empty_rows),    max(non_empty_rows),
                    min(non_empty_columns), max(non_empty_columns))
        image_data_new = image_data[crop_box[0]:crop_box[1]+1,
                                    crop_box[2]:crop_box[3]+1, :]
        img = Image.fromarray(image_data_new)
        return img 
Example #4
Source File: util.py    From malmomo with MIT License 5 votes vote down vote up
def rgb_to_png_bytes(rgb):
  img = Image.fromarray(rgb)
  sio = StringIO.StringIO()
  img.save(sio, format="png")
  return sio.getvalue() 
Example #5
Source File: transforms.py    From deep-learning-from-scratch-3 with MIT License 5 votes vote down vote up
def __call__(self, array):
        data = array.transpose(1, 2, 0)
        return Image.fromarray(data) 
Example #6
Source File: reader.py    From Cytomine-python-client with Apache License 2.0 5 votes vote down vote up
def transform_rgb_to_bgr(image):
    sub = image.convert("RGB")
    data = np.array(sub)
    red, green, blue = data.T
    data = np.array([blue, green, red])
    data = data.transpose()
    return Image.fromarray(data) 
Example #7
Source File: __init__.py    From anna with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _save(self):
        tt = datetime.now()
        time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')

        W = self.model.conv1.W.get_value()
        W = W.transpose(1, 2, 0, 3)

        row_list = []
        img_list = []
        k = 0

        rows = W.shape[3] / 16
        bar = 0.5 * numpy.ones((W.shape[0], 1, 3))
        for i in range(rows):
            row_list.append(bar)
            for j in range(16):
                W0 = W[:, :, :, k]
                W0 -= W0.min()
                W0 /= W0.max()

                # W0[:, :,0] -= W0[:, :,0].min()
                # W0[:, :,0] /= W0[:, :,0].max()
                # W0[:, :,1] -= W0[:, :,1].min()
                # W0[:, :,1] /= W0[:, :,1].max()
                # W0[:, :,2] -= W0[:, :,2].min()
                # W0[:, :,2] /= W0[:, :,2].max()
                row_list.append(W0)
                row_list.append(bar)
                k += 1
            row_image = numpy.hstack(row_list)
            row_list = []
            bar_h = 0.5 * numpy.ones((1, row_image.shape[1], 3))
            img_list.append(bar_h)
            img_list.append(row_image)
        img_list.append(bar_h)
        img_image = numpy.vstack(img_list)

        to_save = Image.fromarray(numpy.uint8(255 * img_image))
        filename = 'filters_' + time_string + '.png'
        filepath = os.path.join(self.save_path, filename)
        to_save.save(filepath) 
Example #8
Source File: __init__.py    From anna with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _save(self):
        tt = datetime.now()
        time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
        sub_path = os.path.join(self.save_path, time_string)

        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

        prediction = self.model.prediction(self.batch)
        for i in range(128):
            image = self.batch[:, :, :, i]
            image = image.transpose(1, 2, 0)

            recon = numpy.array(prediction[:, :, :, i])
            recon = recon.transpose(1, 2, 0)

            image_array = numpy.uint8(rescale(numpy.hstack((image, recon))))

            to_save = Image.fromarray(image_array)
            filename = 'recon-%02d.jpeg' % i
            filepath = os.path.join(sub_path, filename)
            to_save.save(filepath) 
Example #9
Source File: generateSpectrograms.py    From CNNs-Speech-Music-Discrimination with MIT License 5 votes vote down vote up
def createSpectrogramFile(x, Fs, fileName, stWin, stStep):
        specgramOr, TimeAxis, FreqAxis = aF.stSpectogram(x, Fs, round(Fs * stWin), round(Fs * stStep), False)            
        print specgramOr.shape
        if inputs[2]=='full':
        	print specgramOr
        	numpy.save(fileName.replace('.png','')+'_spectrogram', specgramOr)
        else:	
	        #specgram = scipy.misc.imresize(specgramOr, float(227.0) / float(specgramOr.shape[0]), interp='bilinear')                        
	        specgram = cv2.resize(specgramOr,(227, 227), interpolation = cv2.INTER_LINEAR)
	        im1 = Image.fromarray(numpy.uint8(matplotlib.cm.jet(specgram)*255))
	        scipy.misc.imsave(fileName, im1) 
Example #10
Source File: ClassifyWav.py    From CNNs-Speech-Music-Discrimination with MIT License 5 votes vote down vote up
def mtCNN_classification(signal, Fs, mtWin, mtStep, RGB_singleFrame_net, SOUND_mean_RGB, transformer_RGB, classNamesCNN):
    mtWin2 = int(mtWin * Fs)
    mtStep2 = int(mtStep * Fs)
    stWin = 0.020
    stStep = 0.015    
    N = len(signal)
    curPos = 0
    count = 0
    fileNames = []
    flagsInd = []
    Ps = []
    randomString = (''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(5)))
    while (curPos < N):                 # for each mid-term segment
        N1 = curPos
        N2 = curPos + mtWin2 + stStep*Fs
        if N2 > N:
            N2 = N
        xtemp = signal[int(N1):int(N2)]                # get mid-term segment        

        specgram, TimeAxis, FreqAxis = aF.stSpectogram(xtemp, Fs, round(Fs * stWin), round(Fs * stStep), False)     # compute spectrogram
        if specgram.shape[0] != specgram.shape[1]:                                                                  # TODO (this must be dynamic!)
            break
        specgram = scipy.misc.imresize(specgram, float(227.0) / float(specgram.shape[0]), interp='bilinear')        # resize to 227 x 227
        
        imSpec = Image.fromarray(np.uint8(matplotlib.cm.jet(specgram)*255))                                         # create image
        curFileName = randomString + "temp_{0:d}.png".format(count)
        fileNames.append(curFileName)    
        scipy.misc.imsave(curFileName, imSpec)
        
        T1 = time.time()
        output_classes, outputP = singleFrame_classify_video(curFileName, RGB_singleFrame_net, transformer_RGB, False, classNamesCNN)        
        T2 = time.time()
        #print T2 - T1
        flagsInd.append(classNamesCNN.index(output_classes[0]))        
        Ps.append(outputP[0])
        #print flagsInd[-1]
        curPos += mtStep2               
        count += 1              
    return np.array(flagsInd), classNamesCNN, np.array(Ps) 
Example #11
Source File: transforms.py    From Qualia2.0 with MIT License 5 votes vote down vote up
def __call__(self, tensor):
        data = tensor.asnumpy()
        data = data[0].transpose(1,2,0)
        return Image.fromarray(data) 
Example #12
Source File: Window.py    From launcher with GNU General Public License v2.0 5 votes vote down vote up
def get_open_windows():
	gtk.main_iteration_do(False)
	screen = wnck.screen_get_default()
	screen.force_update()
	win = screen.get_windows_stacked()
	windows=[]
	for w in win:
			if  'NORMAL' in str(w.get_window_type()):
				if "ducklauncher!!!"==w.get_name():
					pass		
				elif w.is_sticky()!=True and "ducklauncher!!"!=w.get_name():
					window={}	
					window['id']=w.get_xid()
					window['title'] =w.get_name()

					window['app']=w.get_application().get_name()
					
					#print w.get_class_group().get_name()
					ico = Apps.ico_from_app(w.get_application().get_icon_name())
					if ico==None:
						ico = Apps.ico_from_app(w.get_application().get_name())
					if ico==None:
						pix=w.get_icon()
						pix= pix.scale_simple(128,128,gtk.gdk.INTERP_HYPER)
						ico_data=  pix.get_pixels_array()
						img = Image.fromarray(ico_data, 'RGBA')
						home = os.path.expanduser("~")+"/.duck"
						try:
    							os.stat(home)
						except:
    							os.mkdir(home)
						#print window
						img_name=str(window["title"]).replace(" ","").replace(".","").lower()
						img_path="{0}/{1}.png".format(home,img_name)					
						img.save(img_path)
						ico=img_path
					window['icon']=ico


					windows.append(window)
	return windows 
Example #13
Source File: page_elements2.py    From namsel with MIT License 5 votes vote down vote up
def draw_hough_outline(self, arr):
        
        arr = invert_bw(arr)
#         import Image
#         Image.fromarray(arr*255).show()
#        h = cv.HoughLinesP(arr, 2, np.pi/4, 5, minLineLength=arr.shape[0]*.10)
        h = cv.HoughLinesP(arr, 2, np.pi/4, 1, minLineLength=arr.shape[0]*.15, maxLineGap=5) #This
#         h = cv.HoughLinesP(arr, 2, np.pi/4, 1, minLineLength=arr.shape[0]*.15, maxLineGap=1)
#        h = cv.HoughLinesP(arr, 2, np.pi/4, 1, minLineLength=arr.shape[0]*.15)
        PI_O4 = np.pi/4
#        if h and h.any():
#        if self._page_type == 'pecha':
#            color = 1
#            thickness = 10
#        else: # Attempt to erase horizontal lines if page_type == book. 
#            # Why? Horizontal lines can break LineCluster if they are broken
#            # e.g. couldn't be filtered out prior to line_breaker.py
#            color = 0
#            thickness = 10
        if h is not None:
            for line in h[0]:
                new = (line[2]-line[0], line[3] - line[1])
                val = (new[0]/np.sqrt(np.dot(new, new)))
                theta = np.arccos(val)
                if theta >= PI_O4: # Vertical line
#                    print line[1] - line[3]
#                     cv.line(arr, (line[0], 0), (line[0], arr.shape[0]), 1, thickness=10)
                    if line[0] < .5*arr.shape[1]:
                        arr[:,:line[0]+12] = 0
                    else:
                        arr[:,line[0]-12:] = 0
                else: # horizontal line
                    if line[2] - line[0] >= .15 * arr.shape[1]:
#                         cv.line(arr, (0, line[1]), (arr.shape[1], line[1]), 1, thickness=50)
                        if line[1] < .5 *arr.shape[0]:
                            arr[:line[1]+17, :] = 0
                        else:
                            arr[line[1]-5:,:] = 0
        

        return ((arr*-1)+1).astype(np.uint8) 
Example #14
Source File: overfeat_wrapper.py    From sklearn-theano with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_overfeat_output_raw(img_arr, layer_id, largenet, overfeatcmd=None,
                            net_weight_file=None, overfeat_dir=None,
                            architecture='linux_64'):

    if img_arr.dtype != np.uint8:
        raise ValueError('Please convert image to uint8')

    if img_arr.shape[2] != 3:
        raise ValueError('Last dimension must index color')

    overfeatcmd = get_overfeat_cmd(overfeatcmd, overfeat_dir, architecture)
    net_weight_file = get_net_weights(net_weight_file, largenet,
                                      overfeat_dir=overfeat_dir)

    image = Image.fromarray(img_arr)

    buf = StringIO.StringIO()
    image.save(buf, format='ppm')
    buf.seek(0)

    command = overfeatcmd + " " + net_weight_file + " -1 %d %d" % (
        int(largenet), layer_id)

    p = subprocess.Popen(
        command.split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
    output = p.communicate(input=buf.buf)[0]

    return output 
Example #15
Source File: train_MixModel_VAE-2StochLayers.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def sample_from_model(model, param_file_path, vae_hyperParams, image_file_path, nImages=100):

    # get op to load the model                                                                                                    
    persister = tf.train.Saver()

    with tf.Session() as s:
        persister.restore(s, param_file_path)
        sample_list = s.run(model.get_samples(nImages))

    for i, samples in enumerate(sample_list):
        image = Image.fromarray(tile_raster_images(X=samples, img_shape=(28, 28), tile_shape=(int(np.sqrt(nImages)), int(np.sqrt(nImages))), tile_spacing=(1, 1)))
        image.save(image_file_path+"_component"+str(i)+".png") 
Example #16
Source File: train_reg_gaussVAE.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def sample_from_model(model, param_file_path, vae_hyperParams, image_file_path, nImages=100):

    # get op to load the model                                                                                                    
    persister = tf.train.Saver()

    with tf.Session() as s:
        persister.restore(s, param_file_path)
        samples = s.run(model.get_samples(nImages))

    image = Image.fromarray(tile_raster_images(X=samples, img_shape=(28, 28), tile_shape=(int(np.sqrt(nImages)), int(np.sqrt(nImages))), tile_spacing=(1, 1)))
    image.save(image_file_path+".png") 
Example #17
Source File: train_DP_VAE.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def sample_from_model(model, param_file_path, vae_hyperParams, image_file_path, nImages=100):

    # get op to load the model                                                                                                    
    persister = tf.train.Saver()

    with tf.Session() as s:
        persister.restore(s, param_file_path)
        sample_list = s.run(model.get_samples(nImages))

    for i, samples in enumerate(sample_list):
        image = Image.fromarray(tile_raster_images(X=samples, img_shape=(28, 28), tile_shape=(int(np.sqrt(nImages)), int(np.sqrt(nImages))), tile_spacing=(1, 1)))
        image.save(image_file_path+"_component"+str(i)+".png") 
Example #18
Source File: train_MixModel_VAE_vCollapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def sample_from_model(model, param_file_path, vae_hyperParams, image_file_path, nImages=100):

    # get op to load the model                                                                                                    
    persister = tf.train.Saver()

    with tf.Session() as s:
        persister.restore(s, param_file_path)
        sample_list = s.run(model.get_samples(nImages))

    for i, samples in enumerate(sample_list):
        image = Image.fromarray(tile_raster_images(X=samples, img_shape=(28, 28), tile_shape=(int(np.sqrt(nImages)), int(np.sqrt(nImages))), tile_spacing=(1, 1)))
        image.save(image_file_path+"_component"+str(i)+".png") 
Example #19
Source File: screenshot_image.py    From iris with Mozilla Public License 2.0 5 votes vote down vote up
def get_gray_image(self):
        """Getter for the gray_image property."""
        return Image.fromarray(self._gray_array) 
Example #20
Source File: screenshot_image.py    From iris with Mozilla Public License 2.0 5 votes vote down vote up
def get_raw_image(self):
        """Getter raw_image property."""
        return Image.fromarray(self._raw_image) 
Example #21
Source File: pattern.py    From iris with Mozilla Public License 2.0 5 votes vote down vote up
def _get_image_from_array(scale: int, array) -> Image:
    """Converts a scaled array into Image."""
    if scale is None or array is None:
        return None
    return Image.fromarray(_apply_scale(scale, array)) 
Example #22
Source File: graphics.py    From spectral with MIT License 5 votes vote down vote up
def make_pil_image(*args, **kwargs):
    '''Creates a PIL Image object.

    USAGE: make_pil_image(source [, bands] [stretch=True] [stretch_all=False],
                          [bounds = (lower, upper)] )

    See `get_rgb` for description of arguments.
    '''
    try:
        from PIL import Image, ImageDraw
    except ImportError:
        import Image
        import ImageDraw

    rgb = get_rgb(*args, **kwargs)
    rgb = (rgb * 255).astype(np.ubyte)
    img = Image.fromarray(rgb)
    return img 
Example #23
Source File: page_elements2.py    From namsel with MIT License 5 votes vote down vote up
def _draw_new_page(self):
        self.page_array = np.ones_like(self.img_arr)
        
        self.tall = set([i for i in self.get_indices() if 
                         self.get_boxes()[i][3] > 3*self.char_mean])
        
#        cv.drawContours(self.page_array, [self.contours[i] for i in 
#                        self.get_indices() if self.get_boxes()[i][2] <= self.tsek_mean + 3*self.tsek_std], 
#                        -1,0, thickness = -1)
#        
#        
#        self.page_array = cv.medianBlur(self.page_array, 19)
#        
#        cv.drawContours(self.page_array, [self.contours[i] for i in 
#                        self.get_indices() if self.get_boxes()[i][2] <= self.tsek_mean + 3*self.tsek_std], 
#                        -1,0, thickness = -1)
        cv.drawContours(self.page_array, [self.contours[i] for i in 
                        range(len(self.contours)) if 
                        self.get_boxes()[i][2] > self.smlmean + 3*self.smstd], 
                        -1,0, thickness = -1)
#        cv.drawContours(self.page_array, [self.contours[i] for i in 
#                        self.get_indices() if self.get_boxes()[i][3] <= 2*self.char_mean], 
#                        -1,0, thickness = -1)
#        cv.erode(self.page_array, None, self.page_array, iterations=2)
#        self.page_array = cv.morphologyEx(self.page_array, cv.MORPH_CLOSE, None,iterations=2)
        import Image
        Image.fromarray(self.page_array*255).show()
#        raw_input()
#        cv.dilate(self.page_array, None, self.page_array, iterations=1) 
Example #24
Source File: kmeans.py    From python-dominant-image-colour with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def showClustering(self):

        localPixels = [None] * len(self.image.getdata())

        for idx, pixel in enumerate(self.pixels):
                shortest = float('Inf')
                for cluster in self.clusters:
                    distance = self.calcDistance(cluster.centroid, pixel)
                    if distance < shortest:
                        shortest = distance
                        nearest = cluster

                localPixels[idx] = nearest.centroid

        w, h = self.image.size
        localPixels = numpy.asarray(localPixels)\
            .astype('uint8')\
            .reshape((h, w, 3))

        colourMap = Image.fromarray(localPixels)
        colourMap.show() 
Example #25
Source File: __init__.py    From anna with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _save(self):
        tt = datetime.now()
        time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
        sub_path = os.path.join(self.save_path, time_string)

        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

        prediction = self.model.prediction(self.batch)
        for i in range(128):
            image = deepcopy(self.batch[:, :, :, i])
            image = image.transpose(1, 2, 0)
            image_min = image.min()
            image -= image_min
            image_max = image.max()
            image /= image_max
            image *= 255

            recon = numpy.array(prediction[:, :, :, i])
            recon = recon.transpose(1, 2, 0)
            recon2 = deepcopy(recon) * 1.0

            recon_mask = (numpy.sum(recon == 0.0, axis=2) < 3)
            # recon_mask = 255*(numpy.tile(recon_mask[:, :,None],(1,1,3)))
            recon_mask = 255 * (numpy.tile(recon_mask[:, :, None],
                                           (1, 1, image.shape[2])))

            recon -= image_min
            recon /= image_max
            recon *= 255

            recon2 -= recon2.min()
            recon2 /= recon2.max()
            recon2 *= 255

            image_array = numpy.uint8(numpy.hstack((image, recon, recon2,
                                                    recon_mask)))

            to_save = Image.fromarray(image_array)
            filename = 'recon-%02d.jpeg' % i
            filepath = os.path.join(sub_path, filename)
            to_save.save(filepath) 
Example #26
Source File: __init__.py    From anna with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _save(self):
        tt = datetime.now()
        time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
        sub_path = os.path.join(self.save_path, time_string)

        if not os.path.exists(sub_path):
            os.makedirs(sub_path)

        prediction = self.model.prediction(self.batch)
        for i in range(self.batch.shape[3]):
            image = deepcopy(self.batch[:, :, :, i])
            image = image.transpose(1, 2, 0)
            image_min = image.min()
            image -= image_min
            image_max = image.max()
            image /= image_max
            image *= 255

            recon = numpy.array(prediction[:, :, :, i])
            recon = recon.transpose(1, 2, 0)
            recon2 = deepcopy(recon) * 1.0

            recon_mask = (numpy.sum(recon == 0.0, axis=2) < 3)
            recon_mask = 255 * (numpy.tile(recon_mask[:, :, None],
                                           (1, 1, image.shape[2])))

            recon -= image_min
            recon /= image_max
            recon *= 255

            recon2 -= recon2.min()
            recon2 /= recon2.max()
            recon2 *= 255

            image_array = numpy.uint8(numpy.hstack((image, recon, recon2,
                                                    recon_mask)))
            # Needed for grayscale images. If color, has no effect.
            image_array = numpy.tile(image_array, (1, 1, 3))

            to_save = Image.fromarray(image_array)
            filename = 'recon-%02d.jpeg' % i
            filepath = os.path.join(sub_path, filename)
            to_save.save(filepath) 
Example #27
Source File: __init__.py    From anna with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _save(self):
        tt = datetime.now()
        time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')

        W = self.model.conv1.W.get_value()
        W = W.transpose(1, 2, 0, 3)

        row_list = []
        img_list = []
        k = 0

        rows = W.shape[3] / 16
        bar = 0.5 * numpy.ones((W.shape[0], 1, 3))
        for i in range(rows):
            row_list.append(bar)
            for j in range(16):
                W0 = W[:, :, :, k]
                W0 -= W0.min()
                W0 /= W0.max()
                W0 = numpy.tile(W0, (1, 1, 3))

                # W0[:, :,0] -= W0[:, :,0].min()
                # W0[:, :,0] /= W0[:, :,0].max()
                # W0[:, :,1] -= W0[:, :,1].min()
                # W0[:, :,1] /= W0[:, :,1].max()
                # W0[:, :,2] -= W0[:, :,2].min()
                # W0[:, :,2] /= W0[:, :,2].max()
                row_list.append(W0)
                row_list.append(bar)
                k += 1
            row_image = numpy.hstack(row_list)
            row_list = []
            bar_h = 0.5 * numpy.ones((1, row_image.shape[1], 3))
            img_list.append(bar_h)
            img_list.append(row_image)
        img_list.append(bar_h)
        img_image = numpy.vstack(img_list)

        to_save = Image.fromarray(numpy.uint8(255 * img_image))
        filename = 'filters_' + time_string + '.png'
        filepath = os.path.join(self.save_path, filename)
        to_save.save(filepath) 
Example #28
Source File: vis.py    From retrieval-2016-deepvision with MIT License 4 votes vote down vote up
def create_thumb(self,im):
    
        x = 800
        y = 800
        size = (y,x)
        image = Image.fromarray(im)
        
        image.thumbnail(size, Image.ANTIALIAS)
        background = Image.new('RGBA', size, "black")
        background.paste(image, ((size[0] - image.size[0]) / 2, (size[1] - image.size[1]) / 2))
        
        return np.array(background)[:,:,0:3] 
Example #29
Source File: dataset_tc.py    From DEMUD with Apache License 2.0 4 votes vote down vote up
def  plot_item(self, m, ind, x, r, k, label, U, scores):
    """plot_item(self, m, ind, x, r, k, label, U, scores)

    Plot selection m (index ind, data in x) and its reconstruction r,
    with k and label to annotate the plot.

    U and scores are optional; ignored in this method, used in some
    classes' submethods.
    """

    if x == [] or r == []: 
      print "Error: No data in x and/or r."
      return
  
    im = Image.fromarray(x.reshape(self.winsize, self.winsize, 3))
    outdir  = os.path.join('results', self.name)
    if not os.path.exists(outdir):
      os.mkdir(outdir)
    figfile = os.path.join(outdir, '%s-sel-%d-k-%d.pdf' % (self.name, m, k))
    im.save(figfile)
    print 'Wrote plot to %s' % figfile

    # record the selections in order, at their x,y coords
    # subtract selection number from n so first sels have high values
    mywidth  = self.width - self.winsize
    myheight = self.height - self.winsize
    # set all unselected items to a value 1 less than the latest
    priority = mywidth*myheight - m
    if priority < 2:
      priority = 2
    self.selections[np.where(self.selections < priority)] = priority-2
    (y,x) = map(int, label.strip('()').split(','))
    #self.selections[ind/mywidth, ind%myheight] = priority
    qtrwin = self.winsize/8
    self.selections[y-qtrwin:y+qtrwin, x-qtrwin:x+qtrwin] = priority
    
    pylab.clf()
    pylab.imshow(self.image)
    pylab.hold(True)
    #pylab.imshow(self.selections)
    masked_sels = np.ma.masked_where(self.selections < priority, self.selections)
    pylab.imshow(masked_sels, interpolation='none', alpha=0.5)
    #figfile = '%s/%s-priority-%d-k-%d.pdf' % (outdir, self.name, m, k)
    # Has to be .png or the alpha transparency doesn't work! (pdf)
    figfile = os.path.join(outdir, '%s-priority-k-%d.png' % (self.name, k))
    pylab.savefig(figfile)
    print 'Wrote selection priority plot to %s' % figfile