Python matplotlib.cm.Greys_r() Examples
The following are 15
code examples of matplotlib.cm.Greys_r().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
matplotlib.cm
, or try the search function
.
Example #1
Source File: loading_utils.py From Dropout_BBalpha with MIT License | 6 votes |
def plot_images(ax, images, shape, color = False): # finally save to file import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt # flip 0 to 1 images = 1.0 - images images = reshape_and_tile_images(images, shape, n_cols=len(images)) if color: from matplotlib import cm plt.imshow(images, cmap=cm.Greys_r, interpolation='nearest') else: plt.imshow(images, cmap='Greys') ax.axis('off')
Example #2
Source File: med2image.py From med2image with MIT License | 6 votes |
def slice_save(self, astr_outputFile): ''' Saves a single slice. ARGS o astr_output The output filename to save the slice to. ''' self.dp.qprint('Outputfile = %s' % astr_outputFile) fformat = astr_outputFile.split('.')[-1] if fformat == 'dcm': if self._dcm: self._dcm.pixel_array.flat = self._Mnp_2Dslice.flat self._dcm.PixelData = self._dcm.pixel_array.tostring() self._dcm.save_as(astr_outputFile) else: raise ValueError('dcm output format only available for DICOM files') else: pylab.imsave(astr_outputFile, self._Mnp_2Dslice, format=fformat, cmap = cm.Greys_r)
Example #3
Source File: visualisation.py From variational-continual-learning with Apache License 2.0 | 6 votes |
def plot_images(images, shape, path, filename, n_rows = 10, color = True): # finally save to file import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt images = reshape_and_tile_images(images, shape, n_rows) if color: from matplotlib import cm plt.imsave(fname=path+filename+".png", arr=images, cmap=cm.Greys_r) else: plt.imsave(fname=path+filename+".png", arr=images, cmap='Greys') #plt.axis('off') #plt.tight_layout() #plt.savefig(path + filename + ".png", format="png") print "saving image to " + path + filename + ".png" plt.close()
Example #4
Source File: util.py From aitom with GNU General Public License v3.0 | 6 votes |
def dsp_img(v, new_figure=True): import matplotlib.pyplot as plt if new_figure: fig = plt.figure() ax = fig.add_subplot(111) else: ax = plt import matplotlib.cm as cm ax_u = ax.imshow( v, cmap = cm.Greys_r ) ax.axis('off') # clear x- and y-axes plt.pause(0.001) # calling pause will display the figure without blocking the program, see segmentation.active_contour.morphsnakes.evolve_visual
Example #5
Source File: chapter_04_001.py From Python-Deep-Learning-SE with MIT License | 5 votes |
def conv(image, im_filter): """ :param image: grayscale image as a 2-dimensional numpy array :param im_filter: 2-dimensional numpy array """ # input dimensions height = image.shape[0] width = image.shape[1] # output image with reduced dimensions im_c = np.zeros((height - len(im_filter) + 1, width - len(im_filter) + 1)) # iterate over all rows and columns for row in range(len(im_c)): for col in range(len(im_c[0])): # apply the filter for i in range(len(im_filter)): for j in range(len(im_filter[0])): im_c[row, col] += image[row + i, col + j] * im_filter[i][j] # fix out-of-bounds values im_c[im_c > 255] = 255 im_c[im_c < 0] = 0 # plot images for comparison import matplotlib.pyplot as plt import matplotlib.cm as cm plt.figure() plt.imshow(image, cmap=cm.Greys_r) plt.show() plt.imshow(im_c, cmap=cm.Greys_r) plt.show()
Example #6
Source File: caption.py From a-PyTorch-Tutorial-to-Image-Captioning with MIT License | 5 votes |
def visualize_att(image_path, seq, alphas, rev_word_map, smooth=True): """ Visualizes caption with weights at every word. Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb :param image_path: path to image that has been captioned :param seq: caption :param alphas: weights :param rev_word_map: reverse word mapping, i.e. ix2word :param smooth: smooth weights? """ image = Image.open(image_path) image = image.resize([14 * 24, 14 * 24], Image.LANCZOS) words = [rev_word_map[ind] for ind in seq] for t in range(len(words)): if t > 50: break plt.subplot(np.ceil(len(words) / 5.), 5, t + 1) plt.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12) plt.imshow(image) current_alpha = alphas[t, :] if smooth: alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=24, sigma=8) else: alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 24, 14 * 24]) if t == 0: plt.imshow(alpha, alpha=0) else: plt.imshow(alpha, alpha=0.8) plt.set_cmap(cm.Greys_r) plt.axis('off') plt.show()
Example #7
Source File: viz.py From Diffusion-Probabilistic-Models with MIT License | 5 votes |
def plot_parameter(theta_in, base_fname_part1, base_fname_part2="", title = '', n_colors=None): """ Save both a raw and receptive field style plot of the contents of theta_in. base_fname_part1 provides the mandatory root of the filename. """ theta = np.array(theta_in.copy()) # in case it was a scalar print "%s min %g median %g mean %g max %g shape"%( title, np.min(theta), np.median(theta), np.mean(theta), np.max(theta)), theta.shape theta = np.squeeze(theta) if len(theta.shape) == 0: # it's a scalar -- make it a 1d array theta = np.array([theta]) shp = theta.shape if len(shp) > 2: theta = theta.reshape((theta.shape[0], -1)) shp = theta.shape ## display basic figure plt.figure(figsize=[8,8]) if len(shp) == 1: plt.plot(theta, '.', alpha=0.5) elif len(shp) == 2: plt.imshow(theta, interpolation='nearest', aspect='auto', cmap=cm.Greys_r) plt.colorbar() plt.title(title) plt.savefig(base_fname_part1 + '_raw_' + base_fname_part2 + '.pdf') plt.close() ## also display it in basis function view if it's a matrix, or ## if it's a bias with a square number of entries if len(shp) >= 2 or is_square(shp[0]): if len(shp) == 1: theta = theta.reshape((-1,1)) plt.figure(figsize=[8,8]) if show_receptive_fields(theta, n_colors=n_colors): plt.suptitle(title + "receptive fields") plt.savefig(base_fname_part1 + '_rf_' + base_fname_part2 + '.pdf') plt.close()
Example #8
Source File: io.py From aitom with GNU General Public License v3.0 | 5 votes |
def save_image_matplotlib(m, out_file, vmin=None, vmax=None): import matplotlib.pyplot as PLT import matplotlib.cm as CM if vmin is None: vmin = m.min() if vmax is None: vmax = m.max() ax_u = PLT.imshow( m, cmap = CM.Greys_r, vmin=vmin, vmax=vmax) PLT.axis('off') PLT.draw() PLT.savefig(out_file, bbox_inches='tight') PLT.close("all")
Example #9
Source File: chapter_04_001.py From Python-Deep-Learning-Second-Edition with MIT License | 5 votes |
def conv(image, im_filter): """ :param image: grayscale image as a 2-dimensional numpy array :param im_filter: 2-dimensional numpy array """ # input dimensions height = image.shape[0] width = image.shape[1] # output image with reduced dimensions im_c = np.zeros((height - len(im_filter) + 1, width - len(im_filter) + 1)) # iterate over all rows and columns for row in range(len(im_c)): for col in range(len(im_c[0])): # apply the filter for i in range(len(im_filter)): for j in range(len(im_filter[0])): im_c[row, col] += image[row + i, col + j] * im_filter[i][j] # fix out-of-bounds values im_c[im_c > 255] = 255 im_c[im_c < 0] = 0 # plot images for comparison import matplotlib.pyplot as plt import matplotlib.cm as cm plt.figure() plt.imshow(image, cmap=cm.Greys_r) plt.show() plt.imshow(im_c, cmap=cm.Greys_r) plt.show()
Example #10
Source File: ouroboros_api.py From aggregation with Apache License 2.0 | 5 votes |
def __display_image__(self,subject_id,args_l,kwargs_l,block=True,title=None): """ return the file names for all the images associated with a given subject_id also download them if necessary :param subject_id: :return: """ subject = self.subject_collection.find_one({"zooniverse_id": subject_id}) url = subject["location"]["standard"] slash_index = url.rfind("/") object_id = url[slash_index+1:] if not(os.path.isfile(self.base_directory+"/Databases/"+self.project+"/images/"+object_id)): urllib.urlretrieve(url, self.base_directory+"/Databases/"+self.project+"/images/"+object_id) fname = self.base_directory+"/Databases/"+self.project+"/images/"+object_id image_file = cbook.get_sample_data(fname) image = plt.imread(image_file) fig, ax = plt.subplots() im = ax.imshow(image,cmap = cm.Greys_r) for args,kwargs in zip(args_l,kwargs_l): print args,kwargs ax.plot(*args,**kwargs) if title is not None: ax.set_title(title) plt.show(block=block)
Example #11
Source File: rrt.py From grammar-activity-prediction with MIT License | 5 votes |
def plan_trajectory_with_ui(img): fig = ppl.gcf() fig.clf() ax = fig.add_subplot(1, 1, 1) ax.imshow(img, cmap=cm.Greys_r) ax.axis('image') ppl.draw() print 'Map is', len(img[0]), 'x', len(img) start, goal = select_start_goal_points(ax, img) path = rrt(img, start, goal, ax) return path
Example #12
Source File: MorseDecoder.py From LSTM_morse with MIT License | 5 votes |
def infer(model, fnImg): "recognize text in image provided by file path" img = create_image2(fnImg, model.imgSize) plt.imshow(img,cmap = cm.Greys_r) batch = Batch(None, [img]) (recognized, probability) = model.inferBatch(batch, True) print('Recognized:', '"' + recognized[0] + '"') print('Probability:', probability[0]) print(recognized) #from pyAudioAnalysis.audioSegmentation import silence_removal
Example #13
Source File: demo.py From Image-Captioning-PyTorch with Apache License 2.0 | 5 votes |
def visualize_att(image_path, seq, alphas, rev_word_map, i, smooth=True): """ Visualizes caption with weights at every word. Adapted from paper authors' repo: https://github.com/kelvinxu/arctic-captions/blob/master/alpha_visualization.ipynb :param image_path: path to image that has been captioned :param seq: caption :param alphas: weights :param rev_word_map: reverse word mapping, i.e. ix2word :param smooth: smooth weights? """ image = Image.open(image_path) image = image.resize([14 * 24, 14 * 24], Image.LANCZOS) words = [rev_word_map[ind] for ind in seq] print(words) for t in range(len(words)): if t > 50: break plt.subplot(np.ceil(len(words) / 5.), 5, t + 1) plt.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12) plt.imshow(image) current_alpha = alphas[t, :] if smooth: alpha = skimage.transform.pyramid_expand(current_alpha.numpy(), upscale=24, sigma=8) else: alpha = skimage.transform.resize(current_alpha.numpy(), [14 * 24, 14 * 24]) if t == 0: plt.imshow(alpha, alpha=0) else: plt.imshow(alpha, alpha=0.8) plt.set_cmap(cm.Greys_r) plt.axis('off') plt.savefig('images/out_{}.jpg'.format(i)) plt.close()
Example #14
Source File: utils_visualise.py From DeepVis-PredDiff with MIT License | 4 votes |
def plot_results(x_test, x_test_im, sensMap, predDiff, tarFunc, classnames, testIdx, save_path): ''' Plot the results of the relevance estimation ''' imsize = x_test.shape tarIdx = np.argmax(tarFunc(x_test)[-1]) tarClass = classnames[tarIdx] #tarIdx = 287 plt.figure() plt.subplot(2,2,1) plt.imshow(x_test_im, interpolation='nearest') plt.title('original') frame = pylab.gca() frame.axes.get_xaxis().set_ticks([]) frame.axes.get_yaxis().set_ticks([]) plt.subplot(2,2,2) plt.imshow(sensMap, cmap=cm.Greys_r, interpolation='nearest') plt.title('sensitivity map') frame = pylab.gca() frame.axes.get_xaxis().set_ticks([]) frame.axes.get_yaxis().set_ticks([]) plt.subplot(2,2,3) p = predDiff.reshape((imsize[1],imsize[2],-1))[:,:,tarIdx] plt.imshow(p, cmap=cm.seismic, vmin=-np.max(np.abs(p)), vmax=np.max(np.abs(p)), interpolation='nearest') plt.colorbar() #plt.imshow(np.abs(p), cmap=cm.Greys_r) plt.title('weight of evidence') frame = pylab.gca() frame.axes.get_xaxis().set_ticks([]) frame.axes.get_yaxis().set_ticks([]) plt.subplot(2,2,4) plt.title('class: {}'.format(tarClass)) p = get_overlayed_image(x_test_im, p) #p = predDiff[0,:,:,np.argmax(netPred(net, x_test)[0]),1].reshape((224,224)) plt.imshow(p, cmap=cm.seismic, vmin=-np.max(np.abs(p)), vmax=np.max(np.abs(p)), interpolation='nearest') #plt.title('class entropy') frame = pylab.gca() frame.axes.get_xaxis().set_ticks([]) frame.axes.get_yaxis().set_ticks([]) fig = plt.gcf() fig.set_size_inches(np.array([12,12]), forward=True) plt.tight_layout() plt.tight_layout() plt.tight_layout() plt.savefig(save_path) plt.close()
Example #15
Source File: poe_fig.py From MJHMC with GNU General Public License v2.0 | 4 votes |
def plot_imgs(imgs, samp_names, step_nums, vmin = -2, vmax = 2): plt.figure(figsize=(5.5,3.6)) nsamplers = len(samp_names) nsteps = len(step_nums) plt.subplot(nsamplers+1, nsteps+1, 1) plt.axis('off') plt.text(0.9, -0.1, "# grads", horizontalalignment='right', verticalalignment='bottom') for step_i in range(nsteps): plt.subplot(nsamplers+1, nsteps+1, 2 + step_i) plt.axis('off') plt.text(0.5, -0.1, "%d"%step_nums[step_i], horizontalalignment='center', verticalalignment='bottom') for samp_i in range(nsamplers): plt.subplot(nsamplers+1, nsteps+1, 1 + (samp_i+1)*(nsteps+1)) plt.axis('off') plt.text(0.9, 0.5, samp_names[samp_i], horizontalalignment='right', verticalalignment='center') for samp_i in range(nsamplers): for step_i in range(nsteps): plt.subplot(nsamplers+1, nsteps+1, 2 + step_i + (samp_i+1)*(nsteps+1)) ptch = imgs[samp_i][step_i].copy() img_w = np.sqrt(np.prod(ptch.shape)) ptch = ptch.reshape((img_w, img_w)) ptch -= vmin ptch /= vmax-vmin plt.imshow(ptch, interpolation='nearest', cmap=cm.Greys_r ) plt.axis('off') # plt.tight_layout() plt.savefig('poe_samples.pdf') plt.close()