Python seaborn.regplot() Examples
The following are 30
code examples of seaborn.regplot().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
seaborn
, or try the search function
.
Example #1
Source File: nanoplotter_main.py From NanoPlot with GNU General Public License v3.0 | 6 votes |
def yield_by_minimal_length_plot(array, name, path, title=None, color="#4CB391", figformat="png"): df = pd.DataFrame(data={"lengths": np.sort(array)[::-1]}) df["cumyield_gb"] = df["lengths"].cumsum() / 10**9 yield_by_length = Plot( path=path + "Yield_By_Length." + figformat, title="Yield by length") ax = sns.regplot( x='lengths', y="cumyield_gb", data=df, x_ci=None, fit_reg=False, color=color, scatter_kws={"s": 3}) ax.set( xlabel='Read length', ylabel='Cumulative yield for minimal length', title=title or yield_by_length.title) yield_by_length.fig = ax.get_figure() yield_by_length.save(format=figformat) plt.close("all") return yield_by_length
Example #2
Source File: business_case_solver_without_classes.py From themarketingtechnologist with Apache License 2.0 | 6 votes |
def visualize_results(df): # Visualize logistic curve using seaborn sns.set(style="darkgrid") sns.regplot(x="pageviews_cumsum", y="is_conversion", data=df, logistic=True, n_boot=500, y_jitter=.01, scatter_kws={"s": 60}) sns.set(font_scale=1.3) sns.plt.title('Logistic Regression Curve') sns.plt.ylabel('Conversion probability') sns.plt.xlabel('Cumulative sum of pageviews') sns.plt.subplots_adjust(right=0.93, top=0.90, left=0.10, bottom=0.10) sns.plt.show() # Run the final program
Example #3
Source File: business_case_solver.py From themarketingtechnologist with Apache License 2.0 | 6 votes |
def visualize_results(self): # Visualize logistic curve using seaborn sns.set(style="darkgrid") sns.regplot(x="pageviews_cumsum", y="is_conversion", data=self.df, logistic=True, n_boot=500, y_jitter=.01, scatter_kws={"s": 60}) sns.set(font_scale=1.3) sns.plt.title('Logistic Regression Curve') sns.plt.ylabel('Conversion probability') sns.plt.xlabel('Cumulative sum of pageviews') sns.plt.subplots_adjust(right=0.93, top=0.90, left=0.10, bottom=0.10) sns.plt.show()
Example #4
Source File: ComparativeAnalyzer.py From faas-profiler with MIT License | 6 votes |
def RelativeDegradation(combined_stat_df): """ This function analyzes the relative degradation of one or more functions. """ # print(combined_stat_df) fig, axs = plt.subplots(ncols=1, sharex=True) # combined_stat_df.plot(kind='scatter', x='rate', y='rel_stress', # alpha=0.5, marker='o', ax=axs[0]) # combined_stat_df.plot(kind='line', x='rate', y='throughput', # alpha=0.5, marker='o', ax=axs[1]) # sns.relplot(data=combined_stat_df, x='rate', y='throughput', ax=axs[1], kind='line') function_of_interest = 'rand_vector_loop_d' test_cats = set(combined_stat_df['Test Category']) for test_cat in test_cats: df = combined_stat_df[combined_stat_df['Test Category'] == test_cat] df = df[df['func_name'] == function_of_interest] sns.regplot(data=df, x='rate', y='throughput', ax=axs, order=2, truncate=True) plt.xlabel('test') plt.ylabel('Function Throughput') plt.show() plt.close()
Example #5
Source File: base.py From auxiliary-deep-generative-models with MIT License | 6 votes |
def plot_eval(self, eval_dict, labels, path_extension=""): """ Plot the loss function in a overall plot and a zoomed plot. :param path_extension: If the plot should be saved in an incremental way. """ def plot(x, y, fit, label): sns.regplot(np.array(x), np.array(y), fit_reg=fit, label=label, scatter_kws={"s": 5}) plt.clf() plt.subplot(211) idx = np.array(eval_dict.values()[0]).shape[0] x = np.array(eval_dict.values()) for i in range(idx): plot(eval_dict.keys(), x[:, i], False, labels[i]) plt.legend() plt.subplot(212) for i in range(idx): plot(eval_dict.keys()[-int(len(x) * 0.25):], x[-int(len(x) * 0.25):][:, i], True, labels[i]) plt.xlabel('Epochs') plt.savefig(paths.get_plot_evaluation_path_for_model(self.model.get_root_path(), path_extension+".png"))
Example #6
Source File: timeplots.py From NanoPlot with GNU General Public License v3.0 | 5 votes |
def plot_over_time(dfs, path, figformat, title, color): num_reads = Plot(path=path + "NumberOfReads_Over_Time." + figformat, title="Number of reads over time") s = dfs.loc[:, "lengths"].resample('10T').count() ax = sns.regplot(x=s.index.total_seconds() / 3600, y=s, x_ci=None, fit_reg=False, color=color, scatter_kws={"s": 3}) ax.set(xlabel='Run time (hours)', ylabel='Number of reads per 10 minutes', title=title or num_reads.title) num_reads.fig = ax.get_figure() num_reads.save(format=figformat) plt.close("all") plots = [num_reads] if "channelIDs" in dfs: pores_over_time = Plot(path=path + "ActivePores_Over_Time." + figformat, title="Number of active pores over time") s = dfs.loc[:, "channelIDs"].resample('10T').nunique() ax = sns.regplot(x=s.index.total_seconds() / 3600, y=s, x_ci=None, fit_reg=False, color=color, scatter_kws={"s": 3}) ax.set(xlabel='Run time (hours)', ylabel='Active pores per 10 minutes', title=title or pores_over_time.title) pores_over_time.fig = ax.get_figure() pores_over_time.save(format=figformat) plt.close("all") plots.append(pores_over_time) return plots
Example #7
Source File: typeI_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation_with_SEM(x_lab, y_lab, x_err_lab, y_err_lab, data, title=None, color=None, ax=None): # Extract only pKa values. x_error = data.loc[:, x_err_lab] y_error = data.loc[:, y_err_lab] x_values = data.loc[:, x_lab] y_values = data.loc[:, y_lab] data = data[[x_lab, y_lab]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 2) max_limit = np.floor(max(data.max()) + 2) axes_limits = np.array([min_limit, max_limit]) # Color current_palette = sns.color_palette() sns_blue = current_palette[0] # Plot plt.figure(figsize=(6, 6)) grid = sns.regplot(x=x_values, y=y_values, data=data, color=color, ci=None) plt.errorbar(x=x_values, y=y_values, xerr=x_error, yerr=y_error, fmt="o", ecolor=sns_blue, capthick='2', label='SEM', alpha=0.75) plt.axis("equal") if len(title) > 70: plt.title(title[:70]+"...") else: plt.title(title) # Add diagonal line. grid.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 pKa error. palette = sns.color_palette('BuGn_r') grid.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) grid.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3]) plt.xlim(axes_limits) plt.ylim(axes_limits)
Example #8
Source File: residuals.py From bartpy with MIT License | 5 votes |
def plot_homoskedasity_diagnostics(model: SklearnModel, ax=None): if ax is None: _, ax = plt.subplots(1, 1, figsize=(5, 5)) sns.regplot(model.predict(model.data.X.values), model.residuals(model.data.X.values), ax=ax) ax.set_title("Fitted Values V Residuals") ax.set_xlabel("Fitted Value") ax.set_ylabel("Residual") return ax
Example #9
Source File: sim_eval.py From Conditional_Density_Estimation with MIT License | 5 votes |
def eval1(): n_observations = 2000 # number of data points n_features = 1 # number of features X_train, X_test, y_train, y_test = build_econ1_dataset(n_observations) print("Size of features in training data: {}".format(X_train.shape)) print("Size of output in training data: {}".format(y_train.shape)) print("Size of features in test data: {}".format(X_test.shape)) print("Size of output in test data: {}".format(y_test.shape)) fig, ax = plt.subplots() fig.set_size_inches(10, 8) sns.regplot(X_train, y_train, fit_reg=False) # plt.savefig('toydata.png') # plt.show() # plot.figure.size = 100 # plt.show() kmn = KernelMixtureNetwork(train_scales=True, n_centers=20) kmn.fit(X_train, y_train, n_epoch=300, eval_set=(X_test, y_test)) kmn.plot_loss() # plt.savefig('trainplot.png') samples = kmn.sample(X_test) print(X_test.shape, samples.shape) jp = sns.jointplot(X_test.ravel(), samples, kind="hex", stat_func=None, size=10) jp.ax_joint.add_line(Line2D([X_test[0][0], X_test[0][0]], [-40, 40], linewidth=3)) jp.ax_joint.add_line(Line2D([X_test[1][0], X_test[1][0]], [-40, 40], color='g', linewidth=3)) jp.ax_joint.add_line(Line2D([X_test[2][0], X_test[2][0]], [-40, 40], color='r', linewidth=3)) plt.savefig('hexplot.png') plt.show() d = kmn.predict_density(X_test[0:3, :].reshape(-1, 1), resolution=1000) df = pd.DataFrame(d).transpose() df.index = np.linspace(kmn.y_min, kmn.y_max, num=1000) df.plot(legend=False, linewidth=3, figsize=(12.2, 8)) plt.savefig('conditional_density.png')
Example #10
Source File: brute_force_plotter.py From brute-force-plotter with MIT License | 5 votes |
def scatter_plot(data, col1, col2, file_name=None): sns.regplot(x=col1, y=col2, data=data, fit_reg=False) sns.despine(left=True)
Example #11
Source File: plot.py From pyem with GNU General Public License v3.0 | 5 votes |
def plot_angle_comparison(df1, df2, lgdtext=None, fname=None, maxrot=90): # if fname is not None: # mpl.rc("savefig", dpi=300) if lgdtext is None: lgdtext = [u"Second (deg)", u"First (deg)"] sns.set(font_scale=3) f, ax = plt.subplots(1, 3, figsize=(30, 10)) sns.regplot(df2["rlnAngleRot"], df1["rlnAngleRot"], fit_reg=False, scatter_kws={"s": 16}, ax=ax[0]) ax[0].set_xlim((-maxrot, maxrot)) ax[0].set_ylim((-maxrot, maxrot)) ax[0].set_xticks(np.arange(-maxrot, maxrot+1, 15)) ax[0].set_yticks(np.arange(-maxrot, maxrot+1, 15)) ax[0].xaxis.label.set_visible(False) ax[0].set_ylabel(lgdtext[0]) ax[0].set_title(u"$\phi$ ( $Z$ )", y=1.01) sns.regplot(df2["rlnAngleTilt"], df1["rlnAngleTilt"], fit_reg=False, scatter_kws={"s": 16}, ax=ax[1]) ax[1].set_xlim((0, 180)) ax[1].set_ylim((0, 180)) ax[1].set_xticks(np.arange(0, 181, 30)) ax[1].set_yticks(np.arange(0, 181, 30)) ax[1].xaxis.label.set_visible(False) ax[1].yaxis.label.set_visible(False) ax[1].set_title(u"$\\theta$ ( $Y'$ )", y=1.01) sns.regplot(df2["rlnAnglePsi"], df1["rlnAnglePsi"], fit_reg=False, scatter_kws={"s": 16}, ax=ax[2]) ax[2].set_xlim((-180, 180)) ax[2].set_ylim((-180, 180)) ax[2].set_xticks(np.arange(-180, 181, 45)) ax[2].set_yticks(np.arange(-180, 181, 45)) ax[2].xaxis.label.set_visible(False) ax[2].yaxis.label.set_visible(False) ax[2].set_title(u"$\psi$ ( $Z''$ )", y=1.01) f.text(0.5, -0.05, lgdtext[1], ha='center', fontsize=36) f.tight_layout(pad=1., w_pad=-1.5, h_pad=0.5) if fname is not None: f.savefig(fname, dpi=300) # mpl.rc("savefig", dpi=80) return f, ax
Example #12
Source File: create_graph_appendix.py From experiment-impact-tracker with MIT License | 5 votes |
def create_scatterplot_from_df( df, x: str, y: str, output_path: str = ".", fig_x: int = 16, fig_y: int = 8 ): """Loads an executive summary df and creates a scatterplot from some pre-specified variables. Args: df ([type]): [description] x (str): [description] y (str): [description] output_path (str, optional): [description]. Defaults to '.'. fig_x (int, optional): [description]. Defaults to 16. fig_y (int, optional): [description]. Defaults to 8. """ if not os.path.exists(output_path): os.makedirs(output_path) # create graph dirs graph_dir = str(fig_x) + "_" + str(fig_y) out_dir = os.path.join(output_path, graph_dir) df[x] = df[x].astype(float) df[y] = df[y].astype(float) os.makedirs(out_dir, exist_ok=True) a4_dims = (14, 9) fig, ax = plt.subplots(figsize=a4_dims) graph = sns.scatterplot( ax=ax, x=x, y=y, data=df, s=325, alpha=0.5, hue="Experiment", legend="brief" ) # , palette="Set1") box = ax.get_position() plt.legend(markerscale=2) # ax.set_position([box.x0,box.y0,box.width*0.83,box.height]) # plt.legend(loc='upper left',bbox_to_anchor=(1,1.15)) # plt.ylim(bottom=0.0) # plt.legend(loc='lower right') # Use regplot to plot the regression line for the whole points # sns.regplot(x="FPOs", y=args.y_axis_var, data=df, sizes=(250, 500), alpha=.5, scatter=False, ax=graph.axes[2]) path_name = os.path.join(out_dir, "{}v{}.png".format(x, y)) plt.savefig(path_name) plt.close("all") return path_name
Example #13
Source File: plot.py From scedar with MIT License | 5 votes |
def regression_scatter(x, y, title=None, xlab=None, ylab=None, figsize=(5, 5), alpha=1, s=0.5, ax=None, **kwargs): """ Paired vector scatter plot. """ if xlab is not None: x = pd.Series(x, name=xlab) if ylab is not None: y = pd.Series(y, name=ylab) # initialize a new figure if ax is None: _, ax = plt.subplots() ax = sns.regplot(x=x, y=y, ax=ax, **kwargs) fig = ax.get_figure() if title is not None: ax.set_title(title) if xlab is not None: ax.set_xlabel(xlab) if ylab is not None: ax.set_ylabel(ylab) fig.set_size_inches(*figsize) plt.close() return fig
Example #14
Source File: ABuMetricsFutures.py From abu with GNU General Public License v3.0 | 5 votes |
def plot_returns_cmp(self, only_show_returns=False, only_info=False): """考虑资金情况下的度量,进行与benchmark的收益度量对比,收益趋势,资金变动可视化,以及其它度量信息,不涉及benchmark""" self.log_func('买入后卖出的交易数量:{}'.format(self.order_has_ret.shape[0])) self.log_func('胜率:{:.4f}%'.format(self.win_rate * 100)) self.log_func('平均获利期望:{:.4f}%'.format(self.gains_mean * 100)) self.log_func('平均亏损期望:{:.4f}%'.format(self.losses_mean * 100)) self.log_func('盈亏比:{:.4f}'.format(self.win_loss_profit_rate)) self.log_func('策略收益: {:.4f}%'.format(self.algorithm_period_returns * 100)) self.log_func('策略年化收益: {:.4f}%'.format(self.algorithm_annualized_returns * 100)) self.log_func('策略买入成交比例:{:.4f}%'.format(self.buy_deal_rate * 100)) self.log_func('策略资金利用率比例:{:.4f}%'.format(self.cash_utilization * 100)) self.log_func('策略共执行{}个交易日'.format(self.num_trading_days)) if only_info: return self.algorithm_cum_returns.plot() plt.legend(['algorithm returns'], loc='best') plt.show() if only_show_returns: return sns.regplot(x=np.arange(0, len(self.algorithm_cum_returns)), y=self.algorithm_cum_returns.values) plt.show() sns.distplot(self.capital.capital_pd['capital_blance'], kde_kws={"lw": 3, "label": "capital blance kde"}) plt.show()
Example #15
Source File: ABuMetricsBase.py From abu with GNU General Public License v3.0 | 5 votes |
def plot_returns_cmp(self, only_show_returns=False, only_info=False): """考虑资金情况下的度量,进行与benchmark的收益度量对比,收益趋势,资金变动可视化,以及其它度量信息""" self.log_func('买入后卖出的交易数量:{}'.format(self.order_has_ret.shape[0])) self.log_func('买入后尚未卖出的交易数量:{}'.format(self.order_keep.shape[0])) self.log_func('胜率:{:.4f}%'.format(self.win_rate * 100)) self.log_func('平均获利期望:{:.4f}%'.format(self.gains_mean * 100)) self.log_func('平均亏损期望:{:.4f}%'.format(self.losses_mean * 100)) self.log_func('盈亏比:{:.4f}'.format(self.win_loss_profit_rate)) self.log_func('策略收益: {:.4f}%'.format(self.algorithm_period_returns * 100)) self.log_func('基准收益: {:.4f}%'.format(self.benchmark_period_returns * 100)) self.log_func('策略年化收益: {:.4f}%'.format(self.algorithm_annualized_returns * 100)) self.log_func('基准年化收益: {:.4f}%'.format(self.benchmark_annualized_returns * 100)) self.log_func('策略买入成交比例:{:.4f}%'.format(self.buy_deal_rate * 100)) self.log_func('策略资金利用率比例:{:.4f}%'.format(self.cash_utilization * 100)) self.log_func('策略共执行{}个交易日'.format(self.num_trading_days)) if only_info: return self.benchmark_cum_returns.plot() self.algorithm_cum_returns.plot() plt.legend(['benchmark returns', 'algorithm returns'], loc='best') plt.show() if only_show_returns: return sns.regplot(x=np.arange(0, len(self.algorithm_cum_returns)), y=self.algorithm_cum_returns.values) plt.show() sns.distplot(self.capital.capital_pd['capital_blance'], kde_kws={"lw": 3, "label": "capital blance kde"}) plt.show()
Example #16
Source File: c7.py From abu with GNU General Public License v3.0 | 5 votes |
def sample_711(): """ 7.1.1 趋势跟踪和均值回复的周期重叠性 :return: """ sns.set_context(rc={'figure.figsize': (14, 7)}) sns.regplot(x=np.arange(0, kl_pd.shape[0]), y=kl_pd.close.values, marker='+') plt.show() from abupy import ABuRegUtil deg = ABuRegUtil.calc_regress_deg(kl_pd.close.values) plt.show() print('趋势角度:' + str(deg)) start = 0 # 前1/4的数据 end = int(kl_pd.shape[0] / 4) # 将x也使用arange切割 x = np.arange(start, end) # y根据start,end进行切片 y = kl_pd.close.values[start:end] sns.regplot(x=x, y=y, marker='+') plt.show() start = int(kl_pd.shape[0] / 4) # 向前推1/4单位个时间 end = start + int(kl_pd.shape[0] / 4) sns.regplot(x=np.arange(start, end), y=kl_pd.close.values[start:end], marker='+') plt.show()
Example #17
Source File: TargetAnalysisContinuous.py From exploripy with MIT License | 5 votes |
def RegPlot (self, feature, target): fig, ax = plt.subplots() #color=self.SelectedColors[random.sample(range(len(self.SelectedColors)), 1)] ax = sns.regplot(x=feature, y=target, data=self.df, ax=ax, color=random.choice(self.SelectedColors)) sns.despine(offset=10, trim=True) this_dir, this_filename = os.path.split(__file__) OutFileName = os.path.join(this_dir, 'HTMLTemplate/dist/output/'+feature + '_regPlot.png') if platform.system() =='Linux': OutFileName = os.path.join(this_dir, 'HTMLTemplate/dist/output/' + feature + '_regPlot.png') plt.savefig(OutFileName) return OutFileName
Example #18
Source File: utils.py From dl-eeg-review with MIT License | 5 votes |
def run_spearmanr(df, condition_col, value_col='acc_diff', log=False, plot=False): """Run Spearman's rank correlation analysis. Args: df (pd.DataFrame): dataframe where each row is a paper. condition_col (str): name of column to use as condition. Keyword Args: value_col (str): name of column to use as the numerical value to run the test on. log (bool): if True, use log of `condition_col` before computing the correlation. Returns: (float): U statistic (float): p-value """ data1 = np.log10(df[condition_col]) if log else df[condition_col] data2 = df[value_col] corr, p = spearmanr(data1, data2) if plot: log_condition_col = 'log_' + condition_col df[log_condition_col] = np.log10(df[condition_col]) fig, ax = plt.subplots() sns.regplot(data=df, x=log_condition_col, y=value_col, robust=True, ax=ax) ax.set_title('Spearman Rho for {} vs. {}\n(pvalue={:0.4f}, ρ={:0.4f})'.format( log_condition_col, value_col, p, corr)) else: fig = None return {'test': 'spearmanr', 'pvalue': p, 'stat': corr, 'fig': fig}
Example #19
Source File: timeplots.py From NanoPlot with GNU General Public License v3.0 | 5 votes |
def cumulative_yield(dfs, path, figformat, title, color): cum_yield_gb = Plot(path=path + "CumulativeYieldPlot_Gigabases." + figformat, title="Cumulative yield") s = dfs.loc[:, "lengths"].cumsum().resample('1T').max() / 1e9 ax = sns.regplot(x=s.index.total_seconds() / 3600, y=s, x_ci=None, fit_reg=False, color=color, scatter_kws={"s": 3}) ax.set(xlabel='Run time (hours)', ylabel='Cumulative yield in gigabase', title=title or cum_yield_gb.title) cum_yield_gb.fig = ax.get_figure() cum_yield_gb.save(format=figformat) plt.close("all") cum_yield_reads = Plot(path=path + "CumulativeYieldPlot_NumberOfReads." + figformat, title="Cumulative yield") s = dfs.loc[:, "lengths"].resample('10T').count().cumsum() ax = sns.regplot(x=s.index.total_seconds() / 3600, y=s, x_ci=None, fit_reg=False, color=color, scatter_kws={"s": 3}) ax.set(xlabel='Run time (hours)', ylabel='Cumulative yield in number of reads', title=title or cum_yield_reads.title) cum_yield_reads.fig = ax.get_figure() cum_yield_reads.save(format=figformat) plt.close("all") return [cum_yield_gb, cum_yield_reads]
Example #20
Source File: basenji_hidden.py From basenji with Apache License 2.0 | 5 votes |
def regplot(vals1, vals2, out_pdf, alpha=0.5, x_label=None, y_label=None): plt.figure() gold = sns.color_palette('husl', 8)[1] ax = sns.regplot( vals1, vals2, color='black', lowess=True, scatter_kws={'color': 'black', 's': 4, 'alpha': alpha}, line_kws={'color': gold}) xmin, xmax = plots.scatter_lims(vals1) ymin, ymax = plots.scatter_lims(vals2) ax.set_xlim(xmin, xmax) if x_label is not None: ax.set_xlabel(x_label) ax.set_ylim(ymin, ymax) if y_label is not None: ax.set_ylabel(y_label) ax.grid(True, linestyle=':') plt.savefig(out_pdf) plt.close() ################################################################################ # __main__ ################################################################################
Example #21
Source File: bam_cov.py From basenji with Apache License 2.0 | 5 votes |
def regplot_gc(vals1, vals2, model, out_pdf): gold = sns.color_palette('husl', 8)[1] plt.figure(figsize=(6, 6)) # plot data and seaborn model ax = sns.regplot( vals1, vals2, color='black', order=3, scatter_kws={'color': 'black', 's': 4, 'alpha': 0.5}, line_kws={'color': gold}) # plot my model predictions svals1 = np.sort(vals1) preds2 = model.predict(svals1[:, np.newaxis]) ax.plot(svals1, preds2) # adjust axis ymin, ymax = scatter_lims(vals2) ax.set_xlim(0.2, 0.8) ax.set_xlabel('GC%') ax.set_ylim(ymin, ymax) ax.set_ylabel('Coverage') ax.grid(True, linestyle=':') plt.savefig(out_pdf) plt.close()
Example #22
Source File: bam_cov.py From basenji with Apache License 2.0 | 5 votes |
def regplot_shift(vals1, vals2, preds2, out_pdf): gold = sns.color_palette('husl', 8)[1] plt.figure(figsize=(6, 6)) # plot data and seaborn model ax = sns.regplot( vals1, vals2, color='black', order=3, scatter_kws={'color': 'black', 's': 4, 'alpha': 0.5}, line_kws={'color': gold}) # plot my model predictions ax.plot(vals1, preds2) # adjust axis ymin, ymax = scatter_lims(vals2) ax.set_xlabel('Shift') ax.set_ylim(ymin, ymax) ax.set_ylabel('Covariance') ax.grid(True, linestyle=':') plt.savefig(out_pdf) plt.close()
Example #23
Source File: typeI_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation(x, y, data, title=None, color=None, kind='joint', ax=None): # Extract only pKa values. data = data[[x, y]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 2) max_limit = np.floor(max(data.max()) + 2) axes_limits = np.array([min_limit, max_limit]) if kind == 'joint': grid = sns.jointplot(x=x, y=y, data=data, kind='reg', joint_kws={'ci': None}, stat_func=None, xlim=axes_limits, ylim=axes_limits, color=color) ax = grid.ax_joint grid.fig.subplots_adjust(top=0.95) grid.fig.suptitle(title) elif kind == 'reg': ax = sns.regplot(x=x, y=y, data=data, color=color, ax=ax) ax.set_title(title) # Add diagonal line. ax.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 pKa error. palette = sns.color_palette('BuGn_r') ax.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) ax.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])
Example #24
Source File: logP_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation(x, y, data, title=None, color=None, kind='joint', ax=None): # Extract only logP values. data = data[[x, y]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 1) max_limit = np.floor(max(data.max()) + 1) axes_limits = np.array([min_limit, max_limit]) if kind == 'joint': grid = sns.jointplot(x=x, y=y, data=data, kind='reg', joint_kws={'ci': None}, stat_func=None, xlim=axes_limits, ylim=axes_limits, color=color) ax = grid.ax_joint grid.fig.subplots_adjust(top=0.95) grid.fig.suptitle(title) elif kind == 'reg': ax = sns.regplot(x=x, y=y, data=data, color=color, ax=ax) ax.set_title(title) # Add diagonal line. ax.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 logP error. palette = sns.color_palette('BuGn_r') ax.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) ax.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])
Example #25
Source File: logP_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation_with_SEM(x_lab, y_lab, x_err_lab, y_err_lab, data, title=None, color=None, ax=None): # Extract only logP values. x_error = data.loc[:, x_err_lab] y_error = data.loc[:, y_err_lab] x_values = data.loc[:, x_lab] y_values = data.loc[:, y_lab] data = data[[x_lab, y_lab]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 1) max_limit = np.floor(max(data.max()) + 1) axes_limits = np.array([min_limit, max_limit]) # Color current_palette = sns.color_palette() sns_blue = current_palette[0] # Plot plt.figure(figsize=(6, 6)) grid = sns.regplot(x=x_values, y=y_values, data=data, color=color, ci=None) plt.errorbar(x=x_values, y=y_values, xerr=x_error, yerr=y_error, fmt="o", ecolor=sns_blue, capthick='2', label='SEM', alpha=0.75) plt.axis("equal") if len(title) > 70: plt.title(title[:70]+"...") else: plt.title(title) # Add diagonal line. grid.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 logP error. palette = sns.color_palette('BuGn_r') grid.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) grid.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3]) plt.xlim(axes_limits) plt.ylim(axes_limits)
Example #26
Source File: logP_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation(x, y, data, title=None, color=None, kind='joint', ax=None): # Extract only logP values. data = data[[x, y]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 1) max_limit = np.floor(max(data.max()) + 1) axes_limits = np.array([min_limit, max_limit]) if kind == 'joint': grid = sns.jointplot(x=x, y=y, data=data, kind='reg', joint_kws={'ci': None}, stat_func=None, xlim=axes_limits, ylim=axes_limits, color=color) ax = grid.ax_joint grid.fig.subplots_adjust(top=0.95) grid.fig.suptitle(title) elif kind == 'reg': ax = sns.regplot(x=x, y=y, data=data, color=color, ax=ax) ax.set_title(title) # Add diagonal line. ax.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 logP error. palette = sns.color_palette('BuGn_r') ax.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) ax.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])
Example #27
Source File: logP_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation_with_SEM(x_lab, y_lab, x_err_lab, y_err_lab, data, title=None, color=None, ax=None): # Extract only logP values. x_error = data.loc[:, x_err_lab] y_error = data.loc[:, y_err_lab] x_values = data.loc[:, x_lab] y_values = data.loc[:, y_lab] data = data[[x_lab, y_lab]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 1) max_limit = np.floor(max(data.max()) + 1) axes_limits = np.array([min_limit, max_limit]) # Color current_palette = sns.color_palette() sns_blue = current_palette[0] # Plot plt.figure(figsize=(6, 6)) grid = sns.regplot(x=x_values, y=y_values, data=data, color=color, ci=None) plt.errorbar(x=x_values, y=y_values, xerr=x_error, yerr=y_error, fmt="o", ecolor=sns_blue, capthick='2', label='SEM', alpha=0.75) plt.axis("equal") if len(title) > 70: plt.title(title[:70]+"...") else: plt.title(title) # Add diagonal line. grid.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 logP error. palette = sns.color_palette('BuGn_r') grid.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) grid.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3]) plt.xlim(axes_limits) plt.ylim(axes_limits)
Example #28
Source File: typeIII_analysis.py From SAMPL6 with MIT License | 5 votes |
def plot_correlation(x, y, data, title=None, color=None, kind='joint', ax=None): # Extract only pKa values. data = data[[x, y]] # Find extreme values to make axes equal. min_limit = np.ceil(min(data.min()) - 2) max_limit = np.floor(max(data.max()) + 2) axes_limits = np.array([min_limit, max_limit]) if kind == 'joint': grid = sns.jointplot(x=x, y=y, data=data, kind='reg', joint_kws={'ci': None}, stat_func=None, xlim=axes_limits, ylim=axes_limits, color=color) ax = grid.ax_joint grid.fig.subplots_adjust(top=0.95) grid.fig.suptitle(title) elif kind == 'reg': ax = sns.regplot(x=x, y=y, data=data, color=color, ax=ax) ax.set_title(title) # Add diagonal line. ax.plot(axes_limits, axes_limits, ls='--', c='black', alpha=0.8, lw=0.7) # Add shaded area for 0.5-1 pKa error. palette = sns.color_palette('BuGn_r') ax.fill_between(axes_limits, axes_limits - 0.5, axes_limits + 0.5, alpha=0.2, color=palette[2]) ax.fill_between(axes_limits, axes_limits - 1, axes_limits + 1, alpha=0.2, color=palette[3])
Example #29
Source File: yes_no.py From guesswhat with Apache License 2.0 | 4 votes |
def __init__(self, path, games, logger, suffix): super(YesNo, self).__init__(path, self.__class__.__name__, suffix) # basic storage for statistics yes_no = collections.defaultdict(list) number_yesno = collections.defaultdict(int) MAX = 15 for i, game in enumerate(games): if game.status == "incomplete": continue yn = [] for a in game.answers: a = a.lower() if a == "yes": number_yesno["yes"] +=1 yn.append(1) elif a == "no": number_yesno["no"] += 1 yn.append(0) else: number_yesno["n/a"] += 1 yn.append(0.5) no_question = len(game.answers) yes_no[no_question].append(yn) sns.set(style="whitegrid") max_no_question = min(MAX, max(yes_no.keys())) + 1 fig = None for key, yn in yes_no.items(): no_question = int(key) yn_mean = np.array(yn).mean(axis=0) if no_question < max_no_question : fig = sns.regplot(x=np.arange(1, no_question + 1, 1), y=yn_mean, lowess=True, scatter=False) #dummy legend sns.regplot(x=np.array([-1]), y=np.array([-1]), scatter=False, line_kws={'linestyle':'-'}, label="Ratio yes-no",ci=None, color="g") fig.legend(loc="best", fontsize='x-large') fig.set_xlim(1, max_no_question) fig.set_ylim(0.1, 1) fig.set_xlabel("Number of questions", {'size': '14'}) fig.set_ylabel('Ratio yes-no', {'size': '14'})
Example #30
Source File: question_object.py From guesswhat with Apache License 2.0 | 4 votes |
def __init__(self, path, games, logger, suffix): super(QuestionVsObject, self).__init__(path, self.__class__.__name__, suffix) ratio_q_object = [] for game in games: no_object = len(game.objects) no_question = len(game.questions) ratio_q_object.append([no_object,no_question]) ratio_q_object = np.array(ratio_q_object) sns.set(style="white") x = np.linspace(3, 20, 80) counter = collections.defaultdict(list) for k, val in ratio_q_object: counter[k] += [val] arr = np.zeros( [4, 21]) for k, val in counter.items(): if len(val) > 0: arr[0,k] = k arr[1,k] = np.mean(val) # Std arr[2, k] = np.std(val) # confidence interval 95% arr[3,k] = 1.95*np.std(val)/np.sqrt(len(val)) #plt.plot(arr[0,:],arr[1,:] , 'b.', label="Human behavior") sns.regplot(x=ratio_q_object[:, 0], y=ratio_q_object[:, 1], x_ci=None, x_bins=20, order=4, label="Human behavior", marker="o", line_kws={'linestyle':'-'}) plt.fill_between(x=arr[0,:], y1=arr[1,:]-arr[2,:], y2=arr[1,:]+arr[2,:], alpha=0.2) sns.regplot (x=x, y=np.log2(x), order=6, scatter=False, label="y = log2(x)", line_kws={'linestyle':'--'}) f = sns.regplot(x=x, y=x , order=1, scatter=False, label="y = x" , line_kws={'linestyle':'--'}) f.legend(loc="best", fontsize='x-large') f.set_xlim(3,20) f.set_ylim(0,20) f.set_xlabel("Number of objects", {'size':'14'}) f.set_ylabel("Number of questions", {'size':'14'})