Python matplotlib.pylab.scatter() Examples
The following are 18
code examples of matplotlib.pylab.scatter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
matplotlib.pylab
, or try the search function
.
Example #1
Source File: models.py From philo2vec with MIT License | 7 votes |
def plot(self, words, num_points=None): if not num_points: num_points = len(words) embeddings = self.get_words_embeddings(words) tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) two_d_embeddings = tsne.fit_transform(embeddings[:num_points, :]) assert two_d_embeddings.shape[0] >= len(words), 'More labels than embeddings' pylab.figure(figsize=(15, 15)) # in inches for i, label in enumerate(words[:num_points]): x, y = two_d_embeddings[i, :] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show()
Example #2
Source File: plot_kmeans_example.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 6 votes |
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None): pylab.figure(num=None, figsize=(8, 6)) if km: pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y)))) else: pylab.scatter(x, y, s=50) pylab.title(title) pylab.xlabel("Occurrence word 1") pylab.ylabel("Occurrence word 2") pylab.autoscale(tight=True) pylab.ylim(ymin=0, ymax=1) pylab.xlim(xmin=0, xmax=1) pylab.grid(True, linestyle='-', color='0.75') return pylab
Example #3
Source File: convolutional_sccs.py From tick with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _plot_intensity(ax, coeffs, upper_bound, lower_bound): n_coeffs = len(coeffs) if n_coeffs > 1: x = np.arange(n_coeffs) ax.step(x, np.exp(coeffs), label="Estimated RI") if upper_bound is not None and lower_bound is not None: ax.fill_between(x, np.exp(lower_bound), np.exp(upper_bound), alpha=.5, color='orange', step='pre', label="95% boostrap CI") elif n_coeffs == 1: if upper_bound is not None and lower_bound is not None: ax.errorbar(0, coeffs, yerr=(np.exp(lower_bound), np.exp(upper_bound)), fmt='o', ecolor='orange') else: ax.scatter([0], np.exp(coeffs), label="Estimated RI") return ax # Internals
Example #4
Source File: portfolio.py From FinQuant with MIT License | 6 votes |
def plot_stocks(self, freq=252): """Plots the Expected annual Returns over annual Volatility of the stocks of the portfolio. :Input: :freq: ``int`` (default: ``252``), number of trading days, default value corresponds to trading days in a year. """ # annual mean returns of all stocks stock_returns = self.comp_mean_returns(freq=freq) stock_volatility = self.comp_stock_volatility(freq=freq) # adding stocks of the portfolio to the plot # plot stocks individually: plt.scatter(stock_volatility, stock_returns, marker="o", s=100, label="Stocks") # adding text to stocks in plot: for i, txt in enumerate(stock_returns.index): plt.annotate( txt, (stock_volatility[i], stock_returns[i]), xytext=(10, 0), textcoords="offset points", label=i, ) plt.legend()
Example #5
Source File: deepjdot_svhn_mnist.py From deepJDOT with MIT License | 5 votes |
def tsne_plot(xs, xt, xs_label, xt_label, subset=True, title=None, pname=None): num_test=1000 import matplotlib.cm as cm if subset: combined_imgs = np.vstack([xs[0:num_test, :], xt[0:num_test, :]]) combined_labels = np.vstack([xs_label[0:num_test, :],xt_label[0:num_test, :]]) combined_labels = combined_labels.astype('int') combined_domain = np.vstack([np.zeros((num_test,1)),np.ones((num_test,1))]) from sklearn.manifold import TSNE tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3000) source_only_tsne = tsne.fit_transform(combined_imgs) plt.figure(figsize=(15,15)) plt.scatter(source_only_tsne[:num_test,0], source_only_tsne[:num_test,1], c=combined_labels[:num_test].argmax(1), s=50, alpha=0.5,marker='o', cmap=cm.jet, label='source') plt.scatter(source_only_tsne[num_test:,0], source_only_tsne[num_test:,1], c=combined_labels[num_test:].argmax(1), s=50, alpha=0.5,marker='+',cmap=cm.jet,label='target') plt.axis('off') plt.legend(loc='best') plt.title(title) if filesave: plt.savefig(os.path.join(pname,title+'.png'),bbox_inches='tight', pad_inches = 0, format='png') else: plt.savefig(title+'.png') plt.close() #%% source model
Example #6
Source File: 5_word2vec.py From udacity-deep-learning with GNU General Public License v3.0 | 5 votes |
def plot(embeddings, labels): assert embeddings.shape[0] >= len(labels), 'More labels than embeddings' pylab.figure(figsize=(15, 15)) # in inches for i, label in enumerate(labels): x, y = embeddings[i, :] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show()
Example #7
Source File: 5_word2vec.py From udacity-deep-learning with GNU General Public License v3.0 | 5 votes |
def plot(embeddings, labels): assert embeddings.shape[0] >= len(labels), 'More labels than embeddings' pylab.figure(figsize=(15, 15)) # in inches for i, label in enumerate(labels): x, y = embeddings[i, :] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show()
Example #8
Source File: assign5_word2vec.py From deep-learning-samples with The Unlicense | 5 votes |
def plot(embeddings, labels): assert embeddings.shape[0] >= len(labels), 'More labels than embeddings' pylab.figure(figsize=(15,15)) # in inches for i, label in enumerate(labels): x, y = embeddings[i,:] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show()
Example #9
Source File: convolutional_sccs.py From tick with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot_learning_curves(self, hyperparameter): if hyperparameter == "TV": C = self.C_tv_history elif hyperparameter == "Group L1": C = self.C_group_l1_history else: raise ValueError("hyperparameter value should be either `TV` or" " `Group L1`") x = np.log10(C) order = np.argsort(x) m = np.array(self.kfold_mean_train_scores)[order] sd = np.array(self.kfold_sd_train_scores)[order] fig = plt.figure() ax = plt.gca() p1 = ax.plot(x[order], m) p2 = ax.fill_between(x[order], m - sd, m + sd, alpha=.3) min_point_train = np.min(m - sd) m = np.array(self.kfold_mean_test_scores)[order] sd = np.array(self.kfold_sd_test_scores)[order] p3 = ax.plot(x[order], m) p4 = ax.fill_between(x[order], m - sd, m + sd, alpha=.3) min_point_test = np.min(m - sd) min_point = min(min_point_train, min_point_test) p5 = plt.scatter(np.log10(C), min_point * np.ones_like(C)) ax.legend([(p1[0], p2), (p3[0], p4), p5], ['train score', 'test score', 'tested hyperparameters'], loc='lower right') ax.set_title('Learning curves') ax.set_xlabel('C %s (log scale)' % hyperparameter) ax.set_ylabel('Loss') return fig, ax
Example #10
Source File: efficient_frontier.py From FinQuant with MIT License | 5 votes |
def plot_optimal_portfolios(self): """Plots markers of the optimised portfolios for - minimum Volatility, and - maximum Sharpe Ratio. """ # compute optimal portfolios min_vol_weights = self.minimum_volatility(save_weights=False) max_sharpe_weights = self.maximum_sharpe_ratio(save_weights=False) # compute return and volatility for each portfolio min_vol_vals = list( annualised_portfolio_quantities( min_vol_weights, self.mean_returns, self.cov_matrix, freq=self.freq ) )[0:2] min_vol_vals.reverse() max_sharpe_vals = list( annualised_portfolio_quantities( max_sharpe_weights, self.mean_returns, self.cov_matrix, freq=self.freq ) )[0:2] max_sharpe_vals.reverse() plt.scatter( min_vol_vals[0], min_vol_vals[1], marker="X", color="g", s=150, label="EF min Volatility", ) plt.scatter( max_sharpe_vals[0], max_sharpe_vals[1], marker="X", color="r", s=150, label="EF max Sharpe Ratio", ) plt.legend()
Example #11
Source File: demo_mi.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def _plot_mi_func(x, y): mi = mutual_info(x, y) title = "NI($X_1$, $X_2$) = %.3f" % mi pylab.scatter(x, y) pylab.title(title) pylab.xlabel("$X_1$") pylab.ylabel("$X_2$")
Example #12
Source File: kNN.py From statistical-learning-methods-note with Apache License 2.0 | 4 votes |
def plotScatter(self, xList, yList, saveFigPath): ''' 根据特征数据 xList 及其类别 yList 绘制散点图,并将绘制出的 散点图保存在 saveFigPath 路径下。 :param xList: 样本特征 :param yList: 样本类别 :param saveFigPath: 保存散点图的路径 :return: ''' # 判断特征是否大于等于二维 # 如果样本的特征大于等于 2 # 那么仅可视化前面 2 维度的数据 if len(xList[0]) >= 2: x1List = map(lambda x: x[0], xList) x2List = map(lambda x: x[1], xList) else: # 1 或 2 维数据都可视化为 2 维 x1List = x2List = map(lambda x: x[0], xList) # 新建画布 scatterFig= plt.figure(saveFigPath) # 预定义:颜色初始化 colorDict = {-1: 'm', 1: 'r', 2: 'b', 3: 'pink', 4: 'orange'} # 绘制每个点 map(lambda idx: \ plt.scatter(x1List[idx], \ x2List[idx], \ marker='o', \ color=colorDict[yList[idx]], \ label=yList[idx]), \ xrange(len(x1List))) # 给每种类别加上标注 # ySet = set(yList) # map(lambda y: \ # plt.legend(str(y), \ # loc='best'), \ # ySet) # 设定其他属性并保存图像后显示 plt.title(saveFigPath) plt.xlabel(r'$x^1$') plt.ylabel(r'$x^2$') plt.grid(True) plt.savefig(saveFigPath) plt.show()
Example #13
Source File: demo_pca.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 4 votes |
def plot_simple_demo_lda(): pylab.clf() fig = pylab.figure(num=None, figsize=(10, 4)) pylab.subplot(121) title = "Original feature space" pylab.title(title) pylab.xlabel("$X_1$") pylab.ylabel("$X_2$") good = x1 > x2 bad = ~good x1g = x1[good] x2g = x2[good] pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue") x1b = x1[bad] x2b = x2[bad] pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white") pylab.grid(True) pylab.subplot(122) X = np.c_[(x1, x2)] lda_inst = lda.LDA(n_components=1) Xtrans = lda_inst.fit_transform(X, good) Xg = Xtrans[good] Xb = Xtrans[bad] pylab.scatter( Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue") pylab.scatter( Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white") title = "Transformed feature space" pylab.title(title) pylab.xlabel("$X'$") fig.axes[1].get_yaxis().set_visible(False) pylab.grid(True) pylab.autoscale(tight=True) filename = "lda_demo.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Example #14
Source File: demo_pca.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 4 votes |
def plot_simple_demo_2(): pylab.clf() fig = pylab.figure(num=None, figsize=(10, 4)) pylab.subplot(121) title = "Original feature space" pylab.title(title) pylab.xlabel("$X_1$") pylab.ylabel("$X_2$") x1 = np.arange(0, 10, .2) x2 = x1 + np.random.normal(scale=1, size=len(x1)) good = x1 > x2 bad = ~good x1g = x1[good] x2g = x2[good] pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue") x1b = x1[bad] x2b = x2[bad] pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white") pylab.grid(True) pylab.subplot(122) X = np.c_[(x1, x2)] pca = decomposition.PCA(n_components=1) Xtrans = pca.fit_transform(X) Xg = Xtrans[good] Xb = Xtrans[bad] pylab.scatter( Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue") pylab.scatter( Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white") title = "Transformed feature space" pylab.title(title) pylab.xlabel("$X'$") fig.axes[1].get_yaxis().set_visible(False) print(pca.explained_variance_ratio_) pylab.grid(True) pylab.autoscale(tight=True) filename = "pca_demo_2.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Example #15
Source File: demo_pca.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 4 votes |
def plot_simple_demo_1(): pylab.clf() fig = pylab.figure(num=None, figsize=(10, 4)) pylab.subplot(121) title = "Original feature space" pylab.title(title) pylab.xlabel("$X_1$") pylab.ylabel("$X_2$") x1 = np.arange(0, 10, .2) x2 = x1 + np.random.normal(scale=1, size=len(x1)) good = (x1 > 5) | (x2 > 5) bad = ~good x1g = x1[good] x2g = x2[good] pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue") x1b = x1[bad] x2b = x2[bad] pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white") pylab.grid(True) pylab.subplot(122) X = np.c_[(x1, x2)] pca = decomposition.PCA(n_components=1) Xtrans = pca.fit_transform(X) Xg = Xtrans[good] Xb = Xtrans[bad] pylab.scatter( Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue") pylab.scatter( Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white") title = "Transformed feature space" pylab.title(title) pylab.xlabel("$X'$") fig.axes[1].get_yaxis().set_visible(False) print(pca.explained_variance_ratio_) pylab.grid(True) pylab.autoscale(tight=True) filename = "pca_demo_1.png" pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
Example #16
Source File: diagnostics.py From photometrypipeline with GNU General Public License v3.0 | 4 votes |
def fwhm_vs_time_plot(self, extraction, data): """create fwhm plot""" logging.info('create FWHM plot') fwhm_filename = os.path.join(self.conf.diagnostics_path, '.diagnostics', 'fwhm.'+self.conf.image_file_format) frame_midtimes = np.array([frame['time'] for frame in extraction]) fwhm = [np.median(frame['catalog_data']['FWHM_IMAGE']) for frame in extraction] fwhm_sig = [np.std(frame['catalog_data']['FWHM_IMAGE']) for frame in extraction] fig, ax = plt.subplots() ax.set_title('Median PSF FWHM per Frame') ax.set_xlabel('Minutes after {:s} UT'.format( Time(frame_midtimes.min(), format='jd', out_subfmt='date_hm').iso)) ax.set_ylabel('Point Source FWHM (px)') ax.scatter((frame_midtimes-frame_midtimes.min())*1440, fwhm, marker='o', color='black') xrange = [plt.xlim()[0], plt.xlim()[1]] ax.plot(xrange, [data['optimum_aprad']*2, data['optimum_aprad']*2], color='blue') ax.set_xlim(xrange) ax.set_ylim([0, max([data['optimum_aprad']*2+1, max(fwhm)])]) ax.grid() fig.savefig(fwhm_filename, dpi=self.conf.plot_dpi, format=self.conf.image_file_format) data['fwhm_filename'] = fwhm_filename # create html map if self.conf.individual_frame_pages: data['fwhm_map'] = "" for i in range(len(extraction)): x, y = ax.transData.transform_point( [((frame_midtimes-frame_midtimes.min())*1440)[i], fwhm[i]]) filename = extraction[i]['fits_filename'] data['fwhm_map'] += ( '<area shape="circle" coords="{:.1f},{:.1f},{:.1f}" ' 'href="{:s}#{:s}" alt="{:s}" title="{:s}">\n').format( x, fig.bbox.height - y, 5, os.path.join(self.conf.diagnostics_path, '.diagnostics', filename+'.html'), '', filename, filename) logging.info('FWHM plot created')
Example #17
Source File: data_utils.py From DeepLearningImplementations with MIT License | 4 votes |
def plot_generated_toy_batch(X_real, generator_model, discriminator_model, noise_dim, gen_iter, noise_scale=0.5): # Generate images X_gen = sample_noise(noise_scale, 10000, noise_dim) X_gen = generator_model.predict(X_gen) # Get some toy data to plot KDE of real data data = load_toy(pts_per_mixture=200) x = data[:, 0] y = data[:, 1] xmin, xmax = -1.5, 1.5 ymin, ymax = -1.5, 1.5 # Peform the kernel density estimate xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] positions = np.vstack([xx.ravel(), yy.ravel()]) values = np.vstack([x, y]) kernel = stats.gaussian_kde(values) f = np.reshape(kernel(positions).T, xx.shape) # Plot the contour fig = plt.figure(figsize=(10,10)) plt.suptitle("Generator iteration %s" % gen_iter, fontweight="bold", fontsize=22) ax = fig.gca() ax.contourf(xx, yy, f, cmap='Blues', vmin=np.percentile(f,80), vmax=np.max(f), levels=np.linspace(0.25, 0.85, 30)) # Also plot the contour of the discriminator delta = 0.025 xmin, xmax = -1.5, 1.5 ymin, ymax = -1.5, 1.5 # Create mesh XX, YY = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta)) arr_pos = np.vstack((np.ravel(XX), np.ravel(YY))).T # Get Z = predictions ZZ = discriminator_model.predict(arr_pos) ZZ = ZZ.reshape(XX.shape) # Plot contour ax.contour(XX, YY, ZZ, cmap="Blues", levels=np.linspace(0.25, 0.85, 10)) dy, dx = np.gradient(ZZ) # Add streamlines # plt.streamplot(XX, YY, dx, dy, linewidth=0.5, cmap="magma", density=1, arrowsize=1) # Scatter generated data plt.scatter(X_gen[:1000, 0], X_gen[:1000, 1], s=20, color="coral", marker="o") l_gen = plt.Line2D((0,1),(0,0), color='coral', marker='o', linestyle='', markersize=20) l_D = plt.Line2D((0,1),(0,0), color='steelblue', linewidth=3) l_real = plt.Rectangle((0, 0), 1, 1, fc="steelblue") # Create legend from custom artist/label lists # bbox_to_anchor = (0.4, 1) ax.legend([l_real, l_D, l_gen], ['Real data KDE', 'Discriminator contour', 'Generated data'], fontsize=18, loc="upper left") ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax + 0.8) plt.savefig("../../figures/toy_dataset_iter%s.jpg" % gen_iter) plt.clf() plt.close()
Example #18
Source File: GeospatIal Distribution DYnamics.py From python-urbanPlanning with MIT License | 4 votes |
def dirAnalyofDynLISAs(data_df,zipPolygon): caseWeekly_unstack=data_df['CasesWeekly'].unstack(level=0) zip_codes= gpd.read_file(zipPolygon) data_df_zipGPD=zip_codes.merge(caseWeekly_unstack,left_on='zip', right_on=caseWeekly_unstack.index) # print(data_df_zipGPD) W=ps.lib.weights.Queen(data_df_zipGPD.geometry) W.transform = 'R' weeks=idx_weekNumber=data_df.index.get_level_values('Week Number') weeks=np.unique(weeks) valArray=data_df_zipGPD[weeks].to_numpy() valArray_fillNan=bfill(valArray).T valArray_fillNan[np.isnan(valArray_fillNan)]=0 # print(valArray_fillNan,valArray_fillNan.shape) rvalArray=(valArray_fillNan.T / valArray_fillNan.mean(axis=1)) # print(rvalArray.shape) Y= rvalArray[:, [0, -1]] # print(Y.shape) np.random.seed(100) r4 = Rose(Y, W, k=4) plt.figure() r4.plot() #plt.scatter(Y[:,0],r4.lag[:,0],) the location of each point is the coordinates of starting relative income as x and the spatial lag of starting relative value as y r4.plot(Y[:,0]) # condition on starting relative income r4.plot(attribute=r4.lag[:,0]) # condition on the spatial lag of starting relative income r4.plot_vectors() # lisa vectors r4.plot_vectors(arrows=False) r4.plot_origin() # origin standardized # did not understand the following part print("cuts:",r4.cuts) print("counts:",r4.counts) np.random.seed(1234) r4.permute(permutations=999) print("p:",r4.p) r4.permute(alternative='positive', permutations=999) print("alter-positive:",r4.p) print("expected-positive:",r4.expected_perm) r4.permute(alternative='negative', permutations=999) print("alter-negative:",r4.p) # help(r4) # print(help(r4.plot())) #population quantile