Python matplotlib.pyplot.subplot() Examples
The following are 30
code examples of matplotlib.pyplot.subplot().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
matplotlib.pyplot
, or try the search function
.
Example #1
Source File: data_augmentation.py From Sound-Recognition-Tutorial with Apache License 2.0 | 10 votes |
def demo_plot(): audio = './data/esc10/audio/Dog/1-30226-A.ogg' y, sr = librosa.load(audio, sr=44100) y_ps = librosa.effects.pitch_shift(y, sr, n_steps=6) # n_steps控制音调变化尺度 y_ts = librosa.effects.time_stretch(y, rate=1.2) # rate控制时间维度的变换尺度 plt.subplot(311) plt.plot(y) plt.title('Original waveform') plt.axis([0, 200000, -0.4, 0.4]) # plt.axis([88000, 94000, -0.4, 0.4]) plt.subplot(312) plt.plot(y_ts) plt.title('Time Stretch transformed waveform') plt.axis([0, 200000, -0.4, 0.4]) plt.subplot(313) plt.plot(y_ps) plt.title('Pitch Shift transformed waveform') plt.axis([0, 200000, -0.4, 0.4]) # plt.axis([88000, 94000, -0.4, 0.4]) plt.tight_layout() plt.show()
Example #2
Source File: visualise_att_maps_epoch.py From Attention-Gated-Networks with MIT License | 7 votes |
def plotNNFilter(units, figure_id, interp='bilinear', colormap=cm.jet, colormap_lim=None): plt.ion() filters = units.shape[2] n_columns = round(math.sqrt(filters)) n_rows = math.ceil(filters / n_columns) + 1 fig = plt.figure(figure_id, figsize=(n_rows*3,n_columns*3)) fig.clf() for i in range(filters): ax1 = plt.subplot(n_rows, n_columns, i+1) plt.imshow(units[:,:,i].T, interpolation=interp, cmap=colormap) plt.axis('on') ax1.set_xticklabels([]) ax1.set_yticklabels([]) plt.colorbar() if colormap_lim: plt.clim(colormap_lim[0],colormap_lim[1]) plt.subplots_adjust(wspace=0, hspace=0) plt.tight_layout() # Epochs
Example #3
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 7 votes |
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp) # Plot hyperparameter evolution results in evolve.txt x = np.loadtxt('evolve.txt', ndmin=2) f = fitness(x) weights = (f - f.min()) ** 2 # for weighted results fig = plt.figure(figsize=(12, 10)) matplotlib.rc('font', **{'size': 8}) for i, (k, v) in enumerate(hyp.items()): y = x[:, i + 5] # mu = (y * weights).sum() / weights.sum() # best weighted result mu = y[f.argmax()] # best single result plt.subplot(4, 5, i + 1) plt.plot(mu, f.max(), 'o', markersize=10) plt.plot(y, f, '.') plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters print('%15s: %.3g' % (k, mu)) fig.tight_layout() plt.savefig('evolve.png', dpi=200)
Example #4
Source File: utils.py From AL with GNU General Public License v2.0 | 6 votes |
def draw_plots(strategy, accu_x, accu_y, auc_x, auc_y): """Draws the plot **Parameters** * strategy * accu_x (*list*) * accu_y (*list*) * auc_x (*list*) * auc_y (*list*) """ plt.figure(1) plt.subplot(211) plt.plot(accu_x, accu_y, '-', label=strategy) plt.legend(loc='best') plt.title('Accuracy') plt.subplot(212) plt.plot(auc_x, auc_y, '-', label=strategy) plt.legend(loc='best') plt.title('AUC')
Example #5
Source File: visualize.py From dataiku-contrib with Apache License 2.0 | 6 votes |
def display_images(images, titles=None, cols=4, cmap=None, norm=None, interpolation=None): """Display the given set of images, optionally with titles. images: list or array of image tensors in HWC format. titles: optional. A list of titles to display with each image. cols: number of images per row cmap: Optional. Color map to use. For example, "Blues". norm: Optional. A Normalize instance to map values to colors. interpolation: Optional. Image interpolation to use for display. """ titles = titles if titles is not None else [""] * len(images) rows = len(images) // cols + 1 plt.figure(figsize=(14, 14 * rows // cols)) i = 1 for image, title in zip(images, titles): plt.subplot(rows, cols, i) plt.title(title, fontsize=9) plt.axis('off') plt.imshow(image.astype(np.uint8), cmap=cmap, norm=norm, interpolation=interpolation) i += 1 plt.show()
Example #6
Source File: test.py From MomentumContrast.pytorch with MIT License | 6 votes |
def show(mnist, targets, ret): target_ids = range(len(set(targets))) colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet', 'orange', 'purple'] plt.figure(figsize=(12, 10)) ax = plt.subplot(aspect='equal') for label in set(targets): idx = np.where(np.array(targets) == label)[0] plt.scatter(ret[idx, 0], ret[idx, 1], c=colors[label], label=label) for i in range(0, len(targets), 250): img = (mnist[i][0] * 0.3081 + 0.1307).numpy()[0] img = OffsetImage(img, cmap=plt.cm.gray_r, zoom=0.5) ax.add_artist(AnnotationBbox(img, ret[i])) plt.legend() plt.show()
Example #7
Source File: visualise_fmaps.py From Attention-Gated-Networks with MIT License | 6 votes |
def plotNNFilter(units, figure_id, interp='bilinear', colormap=cm.jet, colormap_lim=None): plt.ion() filters = units.shape[2] n_columns = round(math.sqrt(filters)) n_rows = math.ceil(filters / n_columns) + 1 fig = plt.figure(figure_id, figsize=(n_rows*3,n_columns*3)) fig.clf() for i in range(filters): ax1 = plt.subplot(n_rows, n_columns, i+1) plt.imshow(units[:,:,i].T, interpolation=interp, cmap=colormap) plt.axis('on') ax1.set_xticklabels([]) ax1.set_yticklabels([]) plt.colorbar() if colormap_lim: plt.clim(colormap_lim[0],colormap_lim[1]) plt.subplots_adjust(wspace=0, hspace=0) plt.tight_layout() # Load options
Example #8
Source File: visualise_attention.py From Attention-Gated-Networks with MIT License | 6 votes |
def plotNNFilter(units, figure_id, interp='bilinear', colormap=cm.jet, colormap_lim=None, title=''): plt.ion() filters = units.shape[2] n_columns = round(math.sqrt(filters)) n_rows = math.ceil(filters / n_columns) + 1 fig = plt.figure(figure_id, figsize=(n_rows*3,n_columns*3)) fig.clf() for i in range(filters): ax1 = plt.subplot(n_rows, n_columns, i+1) plt.imshow(units[:,:,i].T, interpolation=interp, cmap=colormap) plt.axis('on') ax1.set_xticklabels([]) ax1.set_yticklabels([]) plt.colorbar() if colormap_lim: plt.clim(colormap_lim[0],colormap_lim[1]) plt.subplots_adjust(wspace=0, hspace=0) plt.tight_layout() plt.suptitle(title)
Example #9
Source File: demos.py From bayesian_bootstrap with MIT License | 6 votes |
def plot_mean_bootstrap_exponential_readme(): X = np.random.exponential(7, 4) classical_samples = [np.mean(resample(X)) for _ in range(10000)] posterior_samples = mean(X, 10000) l, r = highest_density_interval(posterior_samples) classical_l, classical_r = highest_density_interval(classical_samples) plt.subplot(2, 1, 1) plt.title('Bayesian Bootstrap of mean') sns.distplot(posterior_samples, label='Bayesian Bootstrap Samples') plt.plot([l, r], [0, 0], linewidth=5.0, marker='o', label='95% HDI') plt.xlim(-1, 18) plt.legend() plt.subplot(2, 1, 2) plt.title('Classical Bootstrap of mean') sns.distplot(classical_samples, label='Classical Bootstrap Samples') plt.plot([classical_l, classical_r], [0, 0], linewidth=5.0, marker='o', label='95% HDI') plt.xlim(-1, 18) plt.legend() plt.savefig('readme_exponential.png', bbox_inches='tight')
Example #10
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_images(imgs, targets, paths=None, fname='images.jpg'): # Plots training images overlaid with targets imgs = imgs.cpu().numpy() targets = targets.cpu().numpy() # targets = targets[targets[:, 1] == 21] # plot only one class fig = plt.figure(figsize=(10, 10)) bs, _, h, w = imgs.shape # batch size, _, height, width bs = min(bs, 16) # limit plot to 16 images ns = np.ceil(bs ** 0.5) # number of subplots for i in range(bs): boxes = xywh2xyxy(targets[targets[:, 0] == i, 2:6]).T boxes[[0, 2]] *= w boxes[[1, 3]] *= h plt.subplot(ns, ns, i + 1).imshow(imgs[i].transpose(1, 2, 0)) plt.plot(boxes[[0, 2, 2, 0, 0]], boxes[[1, 1, 3, 3, 1]], '.-') plt.axis('off') if paths is not None: s = Path(paths[i]).name plt.title(s[:min(len(s), 40)], fontdict={'size': 8}) # limit to 40 characters fig.tight_layout() fig.savefig(fname, dpi=200) plt.close()
Example #11
Source File: massachusetts_road_segm.py From Recipes with MIT License | 6 votes |
def plot_some_results(pred_fn, test_generator, n_images=10): fig_ctr = 0 for data, seg in test_generator: res = pred_fn(data) for d, s, r in zip(data, seg, res): plt.figure(figsize=(12, 6)) plt.subplot(1, 3, 1) plt.imshow(d.transpose(1,2,0)) plt.title("input patch") plt.subplot(1, 3, 2) plt.imshow(s[0]) plt.title("ground truth") plt.subplot(1, 3, 3) plt.imshow(r) plt.title("segmentation") plt.savefig("road_segmentation_result_%03.0f.png"%fig_ctr) plt.close() fig_ctr += 1 if fig_ctr > n_images: break
Example #12
Source File: plotting.py From medicaldetectiontoolkit with Apache License 2.0 | 6 votes |
def __init__(self, cf): self.file_name = cf.plot_dir + '/monitor_{}'.format(cf.fold) self.exp_name = cf.fold_dir self.do_validation = cf.do_validation self.separate_values_dict = cf.assign_values_to_extra_figure self.figure_list = [] for n in range(cf.n_monitoring_figures): self.figure_list.append(plt.figure(figsize=(10, 6))) self.figure_list[-1].ax1 = plt.subplot(111) self.figure_list[-1].ax1.set_xlabel('epochs') self.figure_list[-1].ax1.set_ylabel('loss / metrics') self.figure_list[-1].ax1.set_xlim(0, cf.num_epochs) self.figure_list[-1].ax1.grid() self.figure_list[0].ax1.set_ylim(0, 1.5) self.color_palette = ['b', 'c', 'r', 'purple', 'm', 'y', 'k', 'tab:gray']
Example #13
Source File: analyze_log.py From spinn with MIT License | 6 votes |
def ShowPlots(subplot=False): for log_ind, path in enumerate(FLAGS.path.split(":")): log = Log(path) if subplot: plt.subplot(len(FLAGS.path.split(":")), 1, log_ind + 1) for index in FLAGS.index.split(","): index = int(index) for attr in ["pred_acc", "parse_acc", "total_cost", "xent_cost", "l2_cost", "action_cost"]: if getattr(FLAGS, attr): if "cost" in attr: assert index == 0, "costs only associated with training log" steps, val = zip(*[(l.step, getattr(l, attr)) for l in log.corpus[index] if l.step < FLAGS.iters]) dct = {} for k, v in zip(steps, val): dct[k] = max(v, dct[k]) if k in dct else v steps, val = zip(*sorted(dct.iteritems())) plt.plot(steps, val, label="Log%d:%s-%d" % (log_ind, attr, index)) plt.xlabel("No. of training iteration") plt.ylabel(FLAGS.ylabel) if FLAGS.legend: plt.legend() plt.show()
Example #14
Source File: visualize_flow.py From residual-flows with MIT License | 6 votes |
def visualize_transform( potential_or_samples, prior_sample, prior_density, transform=None, inverse_transform=None, samples=True, npts=100, memory=100, device="cpu" ): """Produces visualization for the model density and samples from the model.""" plt.clf() ax = plt.subplot(1, 3, 1, aspect="equal") if samples: plt_samples(potential_or_samples, ax, npts=npts) else: plt_potential_func(potential_or_samples, ax, npts=npts) ax = plt.subplot(1, 3, 2, aspect="equal") if inverse_transform is None: plt_flow(prior_density, transform, ax, npts=npts, device=device) else: plt_flow_density(prior_density, inverse_transform, ax, npts=npts, memory=memory, device=device) ax = plt.subplot(1, 3, 3, aspect="equal") if transform is not None: plt_flow_samples(prior_sample, transform, ax, npts=npts, memory=memory, device=device)
Example #15
Source File: utils.py From DSMnet with Apache License 2.0 | 6 votes |
def imsplot_tensor(*imgs_tensor): """ 使用matplotlib.pyplot绘制多个tensor类型图片 图片尺寸应为(bn, c, h, w) 或是单个图片尺寸为(1, c, h, w)的序列 """ count = min(8, len(imgs_tensor)) if(count==0): return col = min(2, count) row = count//col if(count%col > 0): row = row + 1 for i in range(count): plt.subplot(row, col, i+1);imshow_tensor(imgs_tensor[i]) # 计算并存储参数当前值和平均值
Example #16
Source File: test_frame.py From recruit with Apache License 2.0 | 5 votes |
def _generate_4_axes_via_gridspec(): import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.gridspec # noqa gs = mpl.gridspec.GridSpec(2, 2) ax_tl = plt.subplot(gs[0, 0]) ax_ll = plt.subplot(gs[1, 0]) ax_tr = plt.subplot(gs[0, 1]) ax_lr = plt.subplot(gs[1, 1]) return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
Example #17
Source File: utils.py From adagan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def debug_updated_weights(opts, steps, weights, data): """ Various debug plots for updated weights of training points. """ assert data.num_points == len(weights), 'Length mismatch' ws_and_ids = sorted(zip(weights, range(len(weights)))) num_plot = 20 * 16 if num_plot > len(weights): return ids = [_id for w, _id in ws_and_ids[:num_plot]] plot_points = data.data[ids] metrics = metrics_lib.Metrics() metrics.make_plots(opts, steps, None, plot_points, prefix='d_least_') ids = [_id for w, _id in ws_and_ids[-num_plot:]] plot_points = data.data[ids] metrics = metrics_lib.Metrics() metrics.make_plots(opts, steps, None, plot_points, prefix='d_most_') plt.clf() ax1 = plt.subplot(211) ax1.set_title('Weights over data points') plt.plot(range(len(weights)), sorted(weights)) plt.axis([0, len(weights), 0., 2. * np.max(weights)]) if data.labels is not None: all_labels = np.unique(data.labels) w_per_label = -1. * np.ones(len(all_labels)) for _id, y in enumerate(all_labels): w_per_label[_id] = np.sum( weights[np.where(data.labels == y)[0]]) ax2 = plt.subplot(212) ax2.set_title('Weights over labels') plt.scatter(range(len(all_labels)), w_per_label, s=30) filename = 'data_w{:02d}.png'.format(steps) create_dir(opts['work_dir']) plt.savefig(o_gfile((opts["work_dir"], filename), 'wb'))
Example #18
Source File: adaboostNB.py From weiboanalysis with Apache License 2.0 | 5 votes |
def print_error_Rate(error_Rate): x=[t for t in range(len(error_Rate))] error_Rate=[t for t in error_Rate] fig = plt.figure() fig.clf() ax = plt.subplot(111) ax.plot(x,error_Rate) # plt.xlabel('False positive rate') # plt.ylabel('True positive rate') # plt.title('ROC curve for AdaBoost horse colic detection system') # ax.axis([0, 50]) plt.show()
Example #19
Source File: exp_synphge.py From connecting_the_dots with MIT License | 5 votes |
def write_img(self, out_path, es, gt, im, ma): logging.info(f'write img {out_path}') u_pos, _ = np.meshgrid(range(es.shape[1]), range(es.shape[0])) diff = np.abs(es - gt) vmin, vmax = np.nanmin(gt), np.nanmax(gt) vmin = vmin - 0.2*(vmax-vmin) vmax = vmax + 0.2*(vmax-vmin) pattern_proj = self.pattern_proj.to('cpu').numpy()[0,0] im_orig = self.data['im0'].detach().to('cpu').numpy()[0,0,0] pattern_diff = np.abs(im_orig - pattern_proj) fig = plt.figure(figsize=(16,16)) es0 = co.cmap.color_depth_map(es[0], scale=vmax) gt0 = co.cmap.color_depth_map(gt[0], scale=vmax) diff0 = co.cmap.color_error_image(diff[0], BGR=True) # plot disparities, ground truth disparity is shown only for reference ax = plt.subplot(3,3,1); plt.imshow(es0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity Est. {es0.min():.4f}/{es0.max():.4f}') ax = plt.subplot(3,3,2); plt.imshow(gt0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity GT {np.nanmin(gt0):.4f}/{np.nanmax(gt0):.4f}') ax = plt.subplot(3,3,3); plt.imshow(diff0[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 Disparity Err. {diff0.mean():.5f}') # plot disparities of the second frame in the track if exists if es.shape[0]>=2: es1 = co.cmap.color_depth_map(es[1], scale=vmax) gt1 = co.cmap.color_depth_map(gt[1], scale=vmax) diff1 = co.cmap.color_error_image(diff[1], BGR=True) ax = plt.subplot(3,3,4); plt.imshow(es1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity Est. {es1.min():.4f}/{es1.max():.4f}') ax = plt.subplot(3,3,5); plt.imshow(gt1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity GT {np.nanmin(gt1):.4f}/{np.nanmax(gt1):.4f}') ax = plt.subplot(3,3,6); plt.imshow(diff1[...,[2,1,0]]); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 Disparity Err. {diff1.mean():.5f}') # plot normalized IR inputs ax = plt.subplot(3,3,7); plt.imshow(im[0], vmin=im.min(), vmax=im.max(), cmap='gray'); plt.xticks([]); plt.yticks([]); ax.set_title(f'F0 IR input {im[0].mean():.5f}/{im[0].std():.5f}') if es.shape[0]>=2: ax = plt.subplot(3,3,8); plt.imshow(im[1], vmin=im.min(), vmax=im.max(), cmap='gray'); plt.xticks([]); plt.yticks([]); ax.set_title(f'F1 IR input {im[1].mean():.5f}/{im[1].std():.5f}') plt.tight_layout() plt.savefig(str(out_path)) plt.close(fig)
Example #20
Source File: residual_plotter.py From gmpe-smtk with GNU Affero General Public License v3.0 | 5 votes |
def create_plot(self): """ Creates a residual plot """ data = self.get_plot_data() # statistics = self.residuals.get_residual_statistics() fig = plt.figure(figsize=self.figure_size) fig.set_tight_layout(True) nrow, ncol = self.get_subplots_rowcols() for tloc, res_type in enumerate(data.keys(), 1): self._residual_plot(plt.subplot(nrow, ncol, tloc), data[res_type], res_type) _save_image(self.filename, self.filetype, self.dpi) if self.show: plt.show()
Example #21
Source File: response_spectrum.py From gmpe-smtk with GNU Affero General Public License v3.0 | 5 votes |
def plot_response_spectra(spectra, axis_type="loglog", figure_size=(8, 6), filename=None, filetype="png", dpi=300): """ Creates a plot of the suite of response spectra (Acceleration, Velocity, Displacement, Pseudo-Acceleration, Pseudo-Velocity) derived from a particular ground motion record """ fig = plt.figure(figsize=figure_size) fig.set_tight_layout(True) ax = plt.subplot(2, 2, 1) # Acceleration PLOT_TYPE[axis_type](ax, spectra["Period"], spectra["Acceleration"]) PLOT_TYPE[axis_type](ax, spectra["Period"], spectra["Pseudo-Acceleration"]) ax.set_xlabel("Periods (s)", fontsize=12) ax.set_ylabel("Acceleration (cm/s/s)", fontsize=12) ax.set_xlim(np.min(spectra["Period"]), np.max(spectra["Period"])) ax.grid() ax.legend(("Acceleration", "PSA"), loc=0) ax = plt.subplot(2, 2, 2) # Velocity PLOT_TYPE[axis_type](ax, spectra["Period"], spectra["Velocity"]) PLOT_TYPE[axis_type](ax, spectra["Period"], spectra["Pseudo-Velocity"]) ax.set_xlabel("Periods (s)", fontsize=12) ax.set_ylabel("Velocity (cm/s)", fontsize=12) ax.set_xlim(np.min(spectra["Period"]), np.max(spectra["Period"])) ax.grid() ax.legend(("Velocity", "PSV"), loc=0) ax = plt.subplot(2, 2, 3) # Displacement PLOT_TYPE[axis_type](ax, spectra["Period"], spectra["Displacement"]) ax.set_xlabel("Periods (s)", fontsize=12) ax.set_ylabel("Displacement (cm)", fontsize=12) ax.set_xlim(np.min(spectra["Period"]), np.max(spectra["Period"])) ax.grid() _save_image(filename, filetype, dpi) plt.show()
Example #22
Source File: run_tests.py From LearningX with MIT License | 5 votes |
def plot_model_tree_fit(model, X, y): output_filename = os.path.join("output", "test_{}_fit.png".format(model.__class__.__name__)) print("Saving model tree predictions plot y vs x to '{}'...".format(output_filename)) plt.figure(figsize=(20, 10)) figure_str = "23" for depth in range(6): # Form model tree print(" -> training model tree depth={}...".format(depth)) model_tree = ModelTree(model, max_depth=depth, min_samples_leaf=10, search_type="greedy", n_search_grid=100) # Train model tree model_tree.fit(X, y, verbose=False) y_pred = model_tree.predict(X) # Plot predictions plt.subplot(int(figure_str + str(depth + 1))) plt.plot(X[:, 0], y, '.', markersize=5, color='k') plt.plot(X[:, 0], y_pred, '.', markersize=5, color='r') plt.legend(['data', 'fit']) plt.title("depth = {}".format(depth)) plt.xlabel("x", fontsize=15) plt.ylabel("y", fontsize=15) plt.grid() plt.suptitle('Model tree (model = {}) fits for different depths'.format(model.__class__.__name__), fontsize=25) plt.savefig(output_filename, bbox_inches='tight') plt.close()
Example #23
Source File: smooth.py From ibllib with MIT License | 5 votes |
def smooth_demo(): t = np.linspace(-4, 4, 100) x = np.sin(t) xn = x + np.random.randn(len(t)) * 0.1 ws = 31 plt.subplot(211) plt.plot(np.ones(ws)) windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'] for w in windows[1:]: eval('plt.plot(np.' + w + '(ws) )') plt.axis([0, 30, 0, 1.1]) plt.legend(windows) plt.title("The smoothing windows") plt.subplot(212) plt.plot(x) plt.plot(xn) for w in windows: plt.plot(rolling_window(xn, 10, w)) lst = ['original signal', 'signal with noise'] lst.extend(windows) plt.legend(lst) plt.title("Smoothing a noisy signal") plt.ion()
Example #24
Source File: match.py From kog-money with MIT License | 5 votes |
def match_template1(template, img, plot=False, method=cv2.TM_SQDIFF_NORMED): img = cv2.imread(img, 0).copy() template = cv2.imread(template, 0) w, h = template.shape[::-1] if lib == OPENCV: res = cv2.matchTemplate(img, template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc else: result = match_template(img, template) ij = np.unravel_index(np.argmax(result), result.shape) top_left = ij[::-1] bottom_right = (top_left[0] + w, top_left[1] + h) if plot: cv2.rectangle(img, top_left, bottom_right, 255, 5) plt.subplot(121) plt.imshow(img) plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) plt.subplot(122) plt.imshow(template) plt.show() return top_left, bottom_right
Example #25
Source File: weights.py From vadnet with GNU Lesser General Public License v3.0 | 5 votes |
def plot_weights(weights): n_filter = weights.shape[-1] n_rowcol = int(math.ceil(math.sqrt(n_filter))) for i in range(n_filter): filter = weights[:,i] plt.subplot(n_rowcol, n_rowcol, i+1) plt.plot(filter) plt.show()
Example #26
Source File: pytorch_MountainCar-v0.py From Deep-reinforcement-learning-with-pytorch with MIT License | 5 votes |
def plot(steps): ax = plt.subplot(111) ax.cla() ax.set_title('Training') ax.set_xlabel('Episode') ax.set_ylabel('Run Time') ax.plot(steps) RunTime = len(steps) path = './PG_MountainCar-v0/'+'RunTime'+str(RunTime)+'.jpg' if len(steps) % 100 == 0: plt.savefig(path) plt.pause(0.0000001)
Example #27
Source File: plot_tsne.py From GroundedTranslation with BSD 3-Clause "New" or "Revised" License | 5 votes |
def ab_plotter(xcoords, ycoords, images, labels): ax = plt.subplot(111) ax.set_xlim([-30, 30]) ax.set_ylim([-30, 30]) for x, y, i, l in zip(xcoords, ycoords, images, labels): arr_hand = i imagebox = OffsetImage(arr_hand, zoom=.1) xy = [x, y] # coordinates to position this image ab = AnnotationBbox(imagebox, xy, xybox=(10., -10.), xycoords='data', boxcoords="offset points", pad=0.0) ax.annotate(ab, xy = xy) # rest is just standard matplotlib boilerplate ax.grid(True) plt.show()
Example #28
Source File: test_ssim.py From DSMnet with Apache License 2.0 | 5 votes |
def implot(im1, im2, im3, im4, im5, im6, im7, im8): m = 4 n = 2 ims = [im1, im2, im3, im4, im5, im6, im7, im8] for i in range(m*n): ax = plt.subplot(m, n, i+1) plt.sca(ax) plt.imshow(ims[i])
Example #29
Source File: Run_Model.py From Deep-reinforcement-learning-with-pytorch with MIT License | 5 votes |
def plot(steps): ax = plt.subplot(111) ax.cla() ax.set_title('Training') ax.set_xlabel('Episode') ax.set_ylabel('Run Time') ax.plot(steps) RunTime = len(steps) path = './PG_MountainCar-v0/'+'RunTime'+str(RunTime)+'.jpg' if len(steps) % 100 == 0: #plt.savefig(path) pass plt.pause(0.0000001)
Example #30
Source File: compute-vad.py From pykaldi with Apache License 2.0 | 5 votes |
def show_plot( key, segment_times, sample_freqs, spec, duration, wav_data, vad_feat ): """This function plots the vad against the signal and the spectrogram. Args: segment_times: the time intervals acting as the x axis sample_freqs: the frequency bins acting as the y axis spec: the spectrogram duration: duration of the wave file wav_data: the wave data vad_feat: VAD features """ import matplotlib.pyplot as plt import matplotlib.mlab as mlb plt.subplot(3, 1, 1) plt.pcolormesh(segment_times, sample_freqs, 10 * np.log10(spec), cmap="jet") plt.ylabel("Frequency [Hz]") plt.xlabel("Time [sec]") plt.subplot(3, 1, 2) axes = plt.gca() axes.set_xlim([0, duration]) tmp_axis = np.linspace(0, duration, wav_data.shape[0]) plt.plot(tmp_axis, wav_data / np.abs(np.max(wav_data))) plt.xlabel("Time [sec]") plt.subplot(3, 1, 3) axes = plt.gca() axes.set_xlim([0, duration]) tmp_axis = np.linspace(0, duration, vad_feat.shape[0]) plt.plot(tmp_axis, vad_feat) plt.xlabel("Time [sec]") plt.savefig("plots/" + key, bbox_inches="tight")