Python matplotlib.pylab.ylim() Examples
The following are 30
code examples of matplotlib.pylab.ylim().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
matplotlib.pylab
, or try the search function
.
Example #1
Source File: plot_kmeans_example.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 6 votes |
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None): pylab.figure(num=None, figsize=(8, 6)) if km: pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y)))) else: pylab.scatter(x, y, s=50) pylab.title(title) pylab.xlabel("Occurrence word 1") pylab.ylabel("Occurrence word 2") pylab.autoscale(tight=True) pylab.ylim(ymin=0, ymax=1) pylab.xlim(xmin=0, xmax=1) pylab.grid(True, linestyle='-', color='0.75') return pylab
Example #2
Source File: util.py From Azimuth with BSD 3-Clause "New" or "Revised" License | 6 votes |
def addqqplotinfo(qnull,M,xl='-log10(P) observed',yl='-log10(P) expected',xlim=None,ylim=None,alphalevel=0.05,legendlist=None,fixaxes=False): distr='log10' pl.plot([0,qnull.max()], [0,qnull.max()],'k') pl.ylabel(xl) pl.xlabel(yl) if xlim is not None: pl.xlim(xlim) if ylim is not None: pl.ylim(ylim) if alphalevel is not None: if distr == 'log10': betaUp, betaDown, theoreticalPvals = _qqplot_bar(M=M,alphalevel=alphalevel,distr=distr) lower = -sp.log10(theoreticalPvals-betaDown) upper = -sp.log10(theoreticalPvals+betaUp) pl.fill_between(-sp.log10(theoreticalPvals),lower,upper,color="grey",alpha=0.5) #pl.plot(-sp.log10(theoreticalPvals),lower,'g-.') #pl.plot(-sp.log10(theoreticalPvals),upper,'g-.') if legendlist is not None: leg = pl.legend(legendlist, loc=4, numpoints=1) # set the markersize for the legend for lo in leg.legendHandles: lo.set_markersize(10) if fixaxes: fix_axes()
Example #3
Source File: benchmark.py From osqp_benchmarks with Apache License 2.0 | 6 votes |
def plot_performance_profiles(problems, solvers): """ Plot performance profiles in matplotlib for specified problems and solvers """ # Remove OSQP polish solver solvers = solvers.copy() for s in solvers: if "polish" in s: solvers.remove(s) df = pd.read_csv('./results/%s/performance_profiles.csv' % problems) plt.figure(0) for solver in solvers: plt.plot(df["tau"], df[solver], label=solver) plt.xlim(1., 10000.) plt.ylim(0., 1.) plt.xlabel(r'Performance ratio $\tau$') plt.ylabel('Ratio of problems solved') plt.xscale('log') plt.legend() plt.grid() plt.show(block=False) results_file = './results/%s/%s.png' % (problems, problems) print("Saving plots to %s" % results_file) plt.savefig(results_file)
Example #4
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 6 votes |
def plot_roc(auc_score, name, tpr, fpr, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.plot([0, 1], [0, 1], 'k--') pylab.plot(fpr, tpr) pylab.fill_between(fpr, tpr, alpha=0.5) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('ROC curve (AUC = %0.2f) / %s' % (auc_score, label), verticalalignment="bottom") pylab.legend(loc="lower right") filename = name.replace(" ", "_") pylab.savefig( os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
Example #5
Source File: spectroscopy.py From qkit with GNU General Public License v2.0 | 6 votes |
def plot_xz_landscape(self): """ plots the xz landscape, i.e., how your vna frequency span changes with respect to the x vector :return: None """ if not qkit.module_available("matplotlib"): raise ImportError("matplotlib not found.") if self.xzlandscape_func: y_values = self.xzlandscape_func(self.spec.x_vec) plt.plot(self.spec.x_vec, y_values, 'C1') plt.fill_between(self.spec.x_vec, y_values+self.z_span/2., y_values-self.z_span/2., color='C0', alpha=0.5) plt.xlim((self.spec.x_vec[0], self.spec.x_vec[-1])) plt.ylim((self.xz_freqpoints[0], self.xz_freqpoints[-1])) plt.show() else: print('No xz funcion generated. Use landscape.generate_xz_function')
Example #6
Source File: utils.py From ndvr-dml with Apache License 2.0 | 6 votes |
def plot_pr_curve(pr_curve_dml, pr_curve_base, title): """ Function that plots the PR-curve. Args: pr_curve: the values of precision for each recall value title: the title of the plot """ plt.figure(figsize=(16, 9)) plt.plot(np.arange(0.0, 1.05, 0.05), pr_curve_base, color='r', marker='o', linewidth=3, markersize=10) plt.plot(np.arange(0.0, 1.05, 0.05), pr_curve_dml, color='b', marker='o', linewidth=3, markersize=10) plt.grid(True, linestyle='dotted') plt.xlabel('Recall', color='k', fontsize=27) plt.ylabel('Precision', color='k', fontsize=27) plt.yticks(color='k', fontsize=20) plt.xticks(color='k', fontsize=20) plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title(title, color='k', fontsize=27) plt.tight_layout() plt.show()
Example #7
Source File: evaluate.py From text-classifier with Apache License 2.0 | 6 votes |
def plot_pr(auc_score, precision, recall, label=None, figure_path=None): """绘制R/P曲线""" try: from matplotlib import pylab pylab.figure(num=None, figsize=(6, 5)) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label)) pylab.fill_between(recall, precision, alpha=0.5) pylab.grid(True, linestyle='-', color='0.75') pylab.plot(recall, precision, lw=1) pylab.savefig(figure_path) except Exception as e: print("save image error with matplotlib") pass
Example #8
Source File: climatology3Spark.py From incubator-sdap-nexus with Apache License 2.0 | 5 votes |
def histogram(vals, variable, n, outFile): figFile = os.path.splitext(outFile)[0] + '_hist.png' M.clf() # M.hist(vals, 47, (-2., 45.)) M.hist(vals, 94) M.xlim(-5, 45) M.xlabel('SST in degrees Celsius') M.ylim(0, 300000) M.ylabel('Count') M.title('Histogram of %s %d-day Mean from %s' % (variable.upper(), n, outFile)) M.show() print >>sys.stderr, 'Writing histogram plot to %s' % figFile M.savefig(figFile) return figFile
Example #9
Source File: util.py From Azimuth with BSD 3-Clause "New" or "Revised" License | 5 votes |
def qqplotp(pv,fileout = None, alphalevel = 0.05,legend=None,xlim=None,ylim=None,ycoord=10,plotsize="652x526",title=None,dohist=True, numbins=50, figsize=[5,5], markersize=2): ''' Read in p-values from filein and make a qqplot adn histogram. If fileout is provided, saves the qqplot only at present. Searches through p until one is found. ''' import pylab as pl pl.ion() fs=8 h1=qqplot(pv, fileout, alphalevel,legend,xlim,ylim,addlambda=True, figsize=figsize, markersize=markersize) #lambda_gc=estimate_lambda(pv) #pl.legend(["gc="+ '%1.3f' % lambda_gc],loc=2) pl.title(title,fontsize=fs) wm=pl.get_current_fig_manager() #e.g. "652x526+100+10 xcoord=100 #wm.window.wm_geometry(plotsize + "+" + str(xcoord) + "+" + str(ycoord)) if dohist: h2=pvalhist(pv, numbins=numbins, figsize=figsize) pl.title(title,fontsize=fs) #wm=pl.get_current_fig_manager() width_height=plotsize.split("x") buffer=10 xcoord=int(xcoord + float(width_height[0])+buffer) #wm.window.wm_geometry(plotsize + "+" + str(xcoord) + "+" + str(ycoord)) else: h2=None return h1,h2
Example #10
Source File: util.py From Azimuth with BSD 3-Clause "New" or "Revised" License | 5 votes |
def fix_axes(buffer=0.1): ''' Makes x and y max the same, and the lower limits 0. ''' maxlim=max(pl.xlim()[1],pl.ylim()[1]) pl.xlim([0-buffer,maxlim+buffer]) pl.ylim([0-buffer,maxlim+buffer])
Example #11
Source File: ClimatologySpark2.py From incubator-sdap-nexus with Apache License 2.0 | 5 votes |
def histogram(vals, variable, n, outFile): figFile = os.path.splitext(outFile)[0] + '_hist.png' M.clf() # M.hist(vals, 47, (-2., 45.)) M.hist(vals, 94) M.xlim(-5, 45) M.xlabel('SST in degrees Celsius') M.ylim(0, 300000) M.ylabel('Count') M.title('Histogram of %s %d-day Mean from %s' % (variable.upper(), n, outFile)) M.show() print >> sys.stderr, 'Writing histogram plot to %s' % figFile M.savefig(figFile) return figFile
Example #12
Source File: ClimatologySpark.py From incubator-sdap-nexus with Apache License 2.0 | 5 votes |
def histogram(vals, variable, n, outFile): figFile = os.path.splitext(outFile)[0] + '_hist.png' M.clf() # M.hist(vals, 47, (-2., 45.)) M.hist(vals, 94) M.xlim(-5, 45) M.xlabel('SST in degrees Celsius') M.ylim(0, 300000) M.ylabel('Count') M.title('Histogram of %s %d-day Mean from %s' % (variable.upper(), n, outFile)) M.show() print >>sys.stderr, 'Writing histogram plot to %s' % figFile M.savefig(figFile) return figFile
Example #13
Source File: io_methods.py From signaltrain with GNU General Public License v3.0 | 5 votes |
def plot_valdata(x_val_cuda, knobs_val_cuda, y_val_cuda, y_val_hat_cuda, effect, \ epoch, loss_val, file_prefix='val_data', num_plots=50, target_size=None): x_size = len(x_val_cuda.data.cpu().numpy()[0]) if target_size is None: y_size = len(y_val_cuda.data.cpu().numpy()[0]) else: y_size = target_size t_small = range(x_size-y_size, x_size) for plot_i in range(0, num_plots): x_val = x_val_cuda.data.cpu().numpy() knobs_w = effect.knobs_wc( knobs_val_cuda.data.cpu().numpy()[plot_i,:] ) plt.figure(plot_i,figsize=(6,8)) titlestr = f'{effect.name} Val data, epoch {epoch+1}, loss_val = {loss_val.item():.3e}\n' for i in range(len(effect.knob_names)): titlestr += f'{effect.knob_names[i]} = {knobs_w[i]:.2f}' if i < len(effect.knob_names)-1: titlestr += ', ' plt.suptitle(titlestr) plt.subplot(3, 1, 1) plt.plot(x_val[plot_i, :], 'b', label='Input') plt.ylim(-1,1) plt.xlim(0,x_size) plt.legend() plt.subplot(3, 1, 2) y_val = y_val_cuda.data.cpu().numpy() plt.plot(t_small, y_val[plot_i, -y_size:], 'r', label='Target') plt.xlim(0,x_size) plt.ylim(-1,1) plt.legend() plt.subplot(3, 1, 3) plt.plot(t_small, y_val[plot_i, -y_size:], 'r', label='Target') y_val_hat = y_val_hat_cuda.data.cpu().numpy() plt.plot(t_small, y_val_hat[plot_i, -y_size:], c=(0,0.5,0,0.85), label='Predicted') plt.ylim(-1,1) plt.xlim(0,x_size) plt.legend() filename = file_prefix + '_' + str(plot_i) + '.png' savefig(filename) return
Example #14
Source File: climatology2.py From incubator-sdap-nexus with Apache License 2.0 | 5 votes |
def histogram(vals, variable, n, outFile): figFile = os.path.splitext(outFile)[0] + '_hist.png' M.clf() # M.hist(vals, 47, (-2., 45.)) M.hist(vals, 94) M.xlim(-5, 45) M.xlabel('SST in degrees Celsius') M.ylim(0, 300000) M.ylabel('Count') M.title('Histogram of %s %d-day Mean from %s' % (variable.upper(), n, outFile)) M.show() print >>sys.stderr, 'Writing histogram plot to %s' % figFile M.savefig(figFile) return figFile
Example #15
Source File: plotting.py From melgan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot_waveform_to_numpy(waveform): fig, ax = plt.subplots(figsize=(12, 3)) ax.plot() ax.plot(range(len(waveform)), waveform, linewidth=0.1, alpha=0.7, color='blue') plt.xlabel("Samples") plt.ylabel("Amplitude") plt.ylim(-1, 1) plt.tight_layout() fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close() return data
Example #16
Source File: process_mmds.py From widedeepnetworks with Apache License 2.0 | 5 votes |
def process_mmd_experiment(width_class): results_file_name = mmd_experiment.results_file_stub + "_" + width_class + ".pickle" results = pickle.load( open(results_file_name,'rb' ) ) callibration_mmds = np.loadtxt('results/callibration_mmds.csv') mean_callibration = np.mean(callibration_mmds) mmd_squareds = results['mmd_squareds'] hidden_layer_numbers = results['hidden_layer_numbers'] hidden_unit_numbers = results['hidden_unit_numbers'] num_repeats = mmd_squareds.shape[2] mean_mmds = np.mean( mmd_squareds, axis = 2 ) std_mmds = np.std( mmd_squareds, axis = 2 ) / np.sqrt(num_repeats) plt.figure() for hidden_layer_number, index in zip(hidden_layer_numbers,range(len(hidden_layer_numbers))): if hidden_layer_number==1: layer_string = ' hidden layer' else: layer_string = ' hidden layers' line_name = str(hidden_layer_number) + layer_string plt.errorbar( hidden_unit_numbers, mean_mmds[:,index], yerr = 2.*std_mmds[:,index], label = line_name) plt.xlabel('Number of hidden units per layer') plt.xlim([0,60]) plt.ylabel('MMD SQUARED(GP, NN)') plt.ylim([0.,0.02]) plt.axhline(y=mean_callibration, color='r', linestyle='--') plt.legend() output_file_name = "../figures/mmds_" + width_class + ".pdf" plt.savefig(output_file_name) embed() plt.show()
Example #17
Source File: eval.py From DeepLearningImplementations with MIT License | 5 votes |
def plot_results(list_log, to_plot="losses"): list_color = [u'#E24A33', u'#348ABD', u'#FBC15E', u'#777777', u'#988ED5', u'#8EBA42', u'#FFB5B8'] plt.figure() for idx, log in enumerate(list_log): with open(log, "r") as f: d = json.load(f) experiment_name = d["experiment_name"] color = list_color[idx] plt.plot(d["train_%s" % to_plot], color=color, linewidth=3, label="Train %s" % experiment_name) plt.plot(d["val_%s" % to_plot], color=color, linestyle="--", linewidth=3,) plt.ylabel(to_plot, fontsize=20) if to_plot == "losses": plt.yscale("log") if to_plot == "accs": plt.ylim([0, 1.1]) plt.xlabel("Number of epochs", fontsize=20) plt.title("%s experiment" % dataset, fontsize=22) plt.legend(loc="best") plt.tight_layout() plt.savefig("./figures/%s_results_%s.png" % (dataset, to_plot)) plt.show()
Example #18
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_bias_variance(data_sizes, train_errors, test_errors, name, title): pylab.figure(num=None, figsize=(6, 5)) pylab.ylim([0.0, 1.0]) pylab.xlabel('Data set size') pylab.ylabel('Error') pylab.title("Bias-Variance for '%s'" % name) pylab.plot( data_sizes, test_errors, "--", data_sizes, train_errors, "b-", lw=1) pylab.legend(["test error", "train error"], loc="upper right") pylab.grid(True, linestyle='-', color='0.75') pylab.savefig( os.path.join(CHART_DIR, "bv_" + name.replace(" ", "_") + ".png"), bbox_inches="tight")
Example #19
Source File: dispersion.py From razzy-spinner with GNU General Public License v3.0 | 5 votes |
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"): """ Generate a lexical dispersion plot. :param text: The source text :type text: list(str) or enum(str) :param words: The target words :type words: list of str :param ignore_case: flag to set if case should be ignored when searching text :type ignore_case: bool """ try: from matplotlib import pylab except ImportError: raise ValueError('The plot function requires matplotlib to be installed.' 'See http://matplotlib.org/') text = list(text) words.reverse() if ignore_case: words_to_comp = list(map(str.lower, words)) text_to_comp = list(map(str.lower, text)) else: words_to_comp = words text_to_comp = text points = [(x,y) for x in range(len(text_to_comp)) for y in range(len(words_to_comp)) if text_to_comp[x] == words_to_comp[y]] if points: x, y = list(zip(*points)) else: x = y = () pylab.plot(x, y, "b|", scalex=.1) pylab.yticks(list(range(len(words))), words, color="b") pylab.ylim(-1, len(words)) pylab.title(title) pylab.xlabel("Word Offset") pylab.show()
Example #20
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_pr(auc_score, name, phase, precision, recall, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.fill_between(recall, precision, alpha=0.5) pylab.plot(recall, precision, lw=1) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label)) filename = name.replace(" ", "_") pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" % (filename, phase)), bbox_inches="tight")
Example #21
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_bias_variance(data_sizes, train_errors, test_errors, name): pylab.clf() pylab.ylim([0.0, 1.0]) pylab.xlabel('Data set size') pylab.ylabel('Error') pylab.title("Bias-Variance for '%s'" % name) pylab.plot( data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1) pylab.legend(["train error", "test error"], loc="upper right") pylab.grid() pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
Example #22
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_pr(auc_score, name, precision, recall, label=None): pylab.clf() pylab.figure(num=None, figsize=(5, 4)) pylab.grid(True) pylab.fill_between(recall, precision, alpha=0.5) pylab.plot(recall, precision, lw=1) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label)) filename = name.replace(" ", "_") pylab.savefig( os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
Example #23
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_roc(auc_score, name, fpr, tpr): pylab.figure(num=None, figsize=(6, 5)) pylab.plot([0, 1], [0, 1], 'k--') pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('False Positive Rate') pylab.ylabel('True Positive Rate') pylab.title('Receiver operating characteristic (AUC=%0.2f)\n%s' % ( auc_score, name)) pylab.legend(loc="lower right") pylab.grid(True, linestyle='-', color='0.75') pylab.fill_between(tpr, fpr, alpha=0.5) pylab.plot(fpr, tpr, lw=1) pylab.savefig( os.path.join(CHART_DIR, "roc_" + name.replace(" ", "_") + ".png"))
Example #24
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_pr(auc_score, name, precision, recall, label=None): pylab.figure(num=None, figsize=(6, 5)) pylab.xlim([0.0, 1.0]) pylab.ylim([0.0, 1.0]) pylab.xlabel('Recall') pylab.ylabel('Precision') pylab.title('P/R (AUC=%0.2f) / %s' % (auc_score, label)) pylab.fill_between(recall, precision, alpha=0.5) pylab.grid(True, linestyle='-', color='0.75') pylab.plot(recall, precision, lw=1) filename = name.replace(" ", "_") pylab.savefig(os.path.join(CHART_DIR, "pr_" + filename + ".png"))
Example #25
Source File: utils.py From Building-Machine-Learning-Systems-With-Python-Second-Edition with MIT License | 5 votes |
def plot_k_complexity(ks, train_errors, test_errors): pylab.figure(num=None, figsize=(6, 5)) pylab.ylim([0.0, 1.0]) pylab.xlabel('k') pylab.ylabel('Error') pylab.title('Errors for for different values of $k$') pylab.plot( ks, test_errors, "--", ks, train_errors, "-", lw=1) pylab.legend(["test error", "train error"], loc="upper right") pylab.grid(True, linestyle='-', color='0.75') pylab.savefig( os.path.join(CHART_DIR, "kcomplexity.png"), bbox_inches="tight")
Example #26
Source File: multirate_helper.py From scikit-dsp-comm with BSD 2-Clause "Simplified" License | 5 votes |
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]): """ """ fir_d.freqz_resp_list([self.b],[1], mode, fs=fs, Npts = 1024) pylab.grid() pylab.ylim(ylim)
Example #27
Source File: multirate_helper.py From scikit-dsp-comm with BSD 2-Clause "Simplified" License | 5 votes |
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]): """ Frequency response plot """ iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs) pylab.grid() pylab.ylim(ylim)
Example #28
Source File: D_collocation_accuracy_check.py From pySDC with BSD 2-Clause "Simplified" License | 4 votes |
def plot_accuracy(results): """ Routine to visualize the errors as well as the expected errors Args: results: the dictionary containing the errors """ # retrieve the list of nvars from results assert 'dt_list' in results, 'ERROR: expecting the list of dts in the results dictionary' dt_list = sorted(results['dt_list']) # Set up plotting parameters params = {'legend.fontsize': 20, 'figure.figsize': (12, 8), 'axes.labelsize': 20, 'axes.titlesize': 20, 'xtick.labelsize': 16, 'ytick.labelsize': 16, 'lines.linewidth': 3 } plt.rcParams.update(params) # create new figure plt.figure() # take x-axis limits from nvars_list + some spacning left and right plt.xlim([min(dt_list) / 2, max(dt_list) * 2]) plt.xlabel('dt') plt.ylabel('abs. error') plt.grid() # get guide for the order of accuracy, i.e. the errors to expect # get error for first entry in nvars_list id = ID(dt=dt_list[0]) base_error = results[id] # assemble optimal errors for 5th order method and plot order_guide_space = [base_error * (2 ** (5 * i)) for i in range(0, len(dt_list))] plt.loglog(dt_list, order_guide_space, color='k', ls='--', label='5th order') min_err = 1E99 max_err = 0E00 err_list = [] # loop over nvars, get errors and find min/max error for y-axis limits for dt in dt_list: id = ID(dt=dt) err = results[id] min_err = min(err, min_err) max_err = max(err, max_err) err_list.append(err) plt.loglog(dt_list, err_list, ls=' ', marker='o', markersize=10, label='experiment') # adjust y-axis limits, add legend plt.ylim([min_err / 10, max_err * 10]) plt.legend(loc=2, ncol=1, numpoints=1) # save plot as PDF, beautify fname = 'step_1_accuracy_test_coll.png' plt.savefig(fname, rasterized=True, bbox_inches='tight') return None
Example #29
Source File: PlotELBO.py From refinery with MIT License | 4 votes |
def plot_all_tasks_for_job(jobpath, args, jobname=None, color=None): ''' Create line plot in current matplotlib figure for each task/run of the designated jobpath ''' if not os.path.exists(jobpath): raise ValueError("No such path: %s" % (jobpath)) taskids = BNPYArgParser.parse_task_ids(jobpath, args.taskids) xAll = list() yAll = list() xLocs = list() yLocs = list() for tt, taskid in enumerate(taskids): xs = np.loadtxt(os.path.join(jobpath, taskid, args.xvar+'.txt')) ys = np.loadtxt(os.path.join(jobpath, taskid, 'evidence.txt')) # remove first-lap of moVB, since ELBO is not accurate if jobpath.count('moVB') > 0 and args.xvar == 'laps': mask = xs >= 1.0 xs = xs[mask] ys = ys[mask] if args.traceEvery is not None: mask = bnpy.util.isEvenlyDivisibleFloat(xs, args.traceEvery) xs = xs[mask] ys = ys[mask] plotargs = dict(markersize=10, linewidth=2, label=None, color=color, markeredgecolor=color) if tt == 0: plotargs['label'] = jobname pylab.plot(xs, ys, '.-', **plotargs) if len(ys) > 0: xLocs.append(xs[-1]) yLocs.append(ys[-1]) yAll.extend(ys[1:]) xAll.extend(xs[1:]) # Zoom in to the useful part of the ELBO trace if len(yAll) > 0: global YMin, YMax ymin = np.percentile(yAll, 1) ymax = np.max(yAll) if YMin is None: YMin = ymin YMax = ymax else: YMin = np.minimum(ymin, YMin) YMax = np.maximum(YMax, ymax) blankmargin = 0.08*(YMax - YMin) pylab.ylim( [YMin, YMax + blankmargin]) pylab.xlabel(XLabelMap[args.xvar]) pylab.ylabel('log evidence')
Example #30
Source File: PlotK.py From refinery with MIT License | 4 votes |
def plot_all_tasks_for_job(jobpath, args, jobname=None, color=None): ''' Create line plot in current matplotlib figure for each task/run of the designated jobpath ''' if not os.path.exists(jobpath): raise ValueError("No such path: %s" % (jobpath)) taskids = BNPYArgParser.parse_task_ids(jobpath, args.taskids) xAll = list() yAll = list() xLocs = list() yLocs = list() for tt, taskid in enumerate(taskids): xs = np.loadtxt(os.path.join(jobpath, taskid, args.xvar+'.txt')) try: ys = np.loadtxt(os.path.join(jobpath, taskid, 'K.txt')) except IOError: MatDict = scipy.io.loadmat(os.path.join(jobpath,taskid, 'AllocPrior.mat')) Kfixed = int(MatDict['K']) ys = Kfixed* np.ones(len(xs)) if args.traceEvery is not None: mask = bnpy.util.isEvenlyDivisibleFloat(xs, args.traceEvery) xs = xs[mask] ys = ys[mask] plotargs = dict(markersize=10, linewidth=2, label=None, color=color, markeredgecolor=color) if tt == 0: plotargs['label'] = jobname pylab.plot(xs, ys, '.-', **plotargs) if len(ys) > 0: xLocs.append(xs[-1]) yLocs.append(ys[-1]) yAll.extend(ys[1:]) xAll.extend(xs[1:]) # Zoom in to the useful part of the ELBO trace if len(yAll) > 0: global YMax ymax = np.max(yAll) if YMax is None: YMax = ymax else: YMax = np.maximum(YMax, ymax) blankmargin = 0.05*(YMax) pylab.ylim( [0, YMax + blankmargin]) pylab.xlabel(XLabelMap[args.xvar]) pylab.ylabel('K')