Python torch.logspace() Examples
The following are 8
code examples of torch.logspace().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: epsilon_greedy.py From rlpyt with MIT License | 6 votes |
def make_vec_eps(self, global_B, env_ranks): """Construct log-spaced epsilon values and select local assignments from the global number of sampler environment instances (for SyncRl and AsyncRl).""" if (self.eps_final_min is not None and self.eps_final_min != self._eps_final_scalar): # vector epsilon. if self.alternating: # In FF case, sampler sets agent.alternating. assert global_B % 2 == 0 global_B = global_B // 2 # Env pairs will share epsilon. env_ranks = list(set([i // 2 for i in env_ranks])) self.eps_init = self._eps_init_scalar * torch.ones(len(env_ranks)) global_eps_final = torch.logspace( torch.log10(torch.tensor(self.eps_final_min)), torch.log10(torch.tensor(self._eps_final_scalar)), global_B) self.eps_final = global_eps_final[env_ranks] self.eps_sample = self.eps_init
Example #2
Source File: dissection.py From gandissect with MIT License | 5 votes |
def collect_maxiou(outdir, model, segloader, segrunner): ''' Returns maxiou and maxiou_level across the data set, one per layer. This is a performance-sensitive function. Best performance is achieved with a counting scheme which assumes a segloader with batch_size 1. ''' device = next(model.parameters()).device conditional_quantiles, label_fracs = collect_cond_quantiles( outdir, model, segloader, segrunner) labelcat, categories = segrunner.get_label_and_category_names() label_category = [categories.index(c) if c in categories else 0 for l, c in labelcat] num_labels, num_categories = (len(n) for n in [labelcat, categories]) label_list = [('label', i) for i in range(num_labels)] category_list = [('all',)] if num_categories <= 1 else ( [('cat', i) for i in range(num_categories)]) max_iou, max_iou_level, max_iou_quantile = {}, {}, {} fracs = torch.logspace(-3, 0, 100) progress = default_progress() for layer, cq in progress(conditional_quantiles.items(), desc='Maxiou'): levels = cq.conditional(('all',)).quantiles(1 - fracs) denoms = 1 - cq.collected_normalize(category_list, levels) isects = (1 - cq.collected_normalize(label_list, levels)) * label_fracs unions = label_fracs + denoms[label_category, :, :] - isects iou = isects / unions # TODO: erase any for which threshold is bad max_iou[layer], level_bucket = iou.max(2) max_iou_level[layer] = levels[ torch.arange(levels.shape[0])[None,:], level_bucket] max_iou_quantile[layer] = fracs[level_bucket] for layer in model.retained_features(): numpy.savez(os.path.join(outdir, safe_dir_name(layer), 'max_iou.npz'), max_iou=max_iou[layer].cpu().numpy(), max_iou_level=max_iou_level[layer].cpu().numpy(), max_iou_quantile=max_iou_quantile[layer].cpu().numpy()) return (max_iou, max_iou_level, max_iou_quantile)
Example #3
Source File: loss.py From oft with MIT License | 5 votes |
def log_ap_loss(logvar, sqr_dists, num_thresh=10): print('dists', float(sqr_dists.min()), float(sqr_dists.max())) print('logvar', float(logvar.min()), float(logvar.max())) def hook(grad): print('grad', float(grad.min()), float(grad.max()), float(grad.sum())) logvar.register_hook(hook) variance = torch.exp(logvar).view(-1, 1) stdev = torch.sqrt(variance) print('stdev', float(stdev.min()), float(stdev.max())) max_dist = math.sqrt(float(sqr_dists.max())) minvar, maxvar = float(stdev.min()), float(stdev.max()) thresholds = torch.logspace( math.log10(1 / maxvar), math.log10(max_dist / minvar), num_thresh).type_as(stdev) print('maxdist: {:.2e} minvar: {:.2e} maxvar: {:.2e}'.format(max_dist, minvar, maxvar)) print('thresholds {:.2e} - {:.2e}'.format(thresholds.min(), thresholds.max())) k_sigma = stdev * thresholds k_sigma_sqr = variance * thresholds ** 2 mask = (sqr_dists.view(-1, 1) < k_sigma_sqr).float() erf = torch.erf(k_sigma) masked_erf = erf * mask masked_exp = stdev * torch.exp(-k_sigma_sqr) * mask loss = masked_exp.sum(0) * masked_erf.sum(0) / erf.sum(0) loss = (loss[0] + loss[-1]) / 2. + loss[1:-1].sum() return -torch.log(loss * CONST / len(variance))
Example #4
Source File: gelpaths.py From torch-gel with MIT License | 5 votes |
def compute_ls_grid(As, y, sns_vec, m, ks, n_ls, l_eps, dtype): """Compute l values for each given k and return a dictionary mapping k to a list (in decreasing order) of lambda values. Arguments have the same meaning as in gel_paths2. sns_vec is a vector of sns_j values as opposed to the matrix computed in gel_paths2. """ ls_grid = {} # The bound is given by max{||A_j'@(y - b_0)||/(m*sqrt{n_j}*k)} where b_0 = # 1'@y/m. So most things can be precomputed. l_max_b_0 = y.mean() l_max_unscaled = max( (A_j.t() @ (y - l_max_b_0)).norm(p=2) / (m * sns_j) for A_j, sns_j in zip(As, sns_vec) ) for k in ks: l_max = l_max_unscaled / k if n_ls == 1: ls_grid[k] = [l_max] else: l_min = l_max * l_eps ls = torch.logspace( math.log10(l_min), math.log10(l_max), steps=n_ls, dtype=dtype ) ls = sorted([l.item() for l in ls], reverse=True) ls_grid[k] = ls return ls_grid
Example #5
Source File: create.py From tntorch with GNU Lesser General Public License v3.0 | 5 votes |
def logspace(*args, **kwargs): """ Creates a 1D :class:`Tensor` with logarithmically spaced values (see PyTorch's `logspace`). :param args: :param kwargs: :return: a 1D :class:`Tensor` """ return tn.Tensor([torch.logspace(*args, **kwargs)[None, :, None]])
Example #6
Source File: Conceptor.py From EchoTorch with GNU General Public License v3.0 | 5 votes |
def plot_delta_measure(self, start, end, steps=50): """ Plot delta measure :param start: :param end: :return: """ # Gamma values gamma_values = torch.logspace(start=start, end=end, steps=steps) # Log10 of gamma values gamma_log_values = torch.log10(gamma_values) # Delta measures C_norms = torch.zeros(steps) delta_scores = torch.zeros(steps) # For each gamma measure for i, gamma in enumerate(gamma_values): delta_scores[i], C_norms[i] = self.delta_measure(float(gamma), epsilon=0.1) # end for # Plot plt.plot(gamma_log_values.numpy(), delta_scores.numpy()) plt.plot(gamma_log_values.numpy(), C_norms.numpy()) plt.show() # end plot_delta_measure # Compute Delta measure
Example #7
Source File: algorithms.py From DeeperInverseCompositionalAlgorithm with MIT License | 4 votes |
def __regularize_residual_volume(self, JtJ, Jt, JtR, weights, pose, invD0, invD1, x0, x1, K, sample_range): """ regularize the approximate with residual volume :param JtJ, the approximated Hessian JtJ :param Jt, the trasposed Jacobian :param JtR, the Right-hand size residual :param weights, the weight matrix :param pose, the initial estimated pose :param invD0, the template inverse depth map :param invD1, the image inverse depth map :param K, the intrinsic parameters :param x0, the template feature map :param x1, the image feature map :param sample_range, the numerb of samples --------------- :return the damped Hessian matrix """ # the following current support only single scale JtR_volumes = [] B, C, H, W = x0.shape px, py = geometry.generate_xy_grid(B, H, W, K) diag_mask = torch.eye(6).view(1,6,6).type_as(JtJ) diagJtJ = diag_mask * JtJ traceJtJ = torch.sum(diagJtJ, (2,1)) epsilon = (traceJtJ * 1e-6).view(B,1,1) * diag_mask n = sample_range lambdas = torch.logspace(-5, 5, n).type_as(JtJ) for s in range(n): # the epsilon is to prevent the matrix to be too ill-conditioned D = lambdas[s] * diagJtJ + epsilon Hessian = JtJ + D pose_s = inverse_update_pose(Hessian, JtR, pose) res_s,_= compute_warped_residual(pose_s, invD0, invD1, x0, x1, px, py, K) JtR_s = torch.bmm(Jt, (weights * res_s).view(B,-1,1)) JtR_volumes.append(JtR_s) JtR_flat = torch.cat(tuple(JtR_volumes), dim=2).view(B,-1) JtJ_flat = JtJ.view(B,-1) damp_est = self.net(torch.cat((JtR_flat, JtJ_flat), dim=1)) R = diag_mask * damp_est.view(B,6,1) + epsilon # also lift-up return JtJ + R
Example #8
Source File: anomalyDetector.py From RNN-Time-series-Anomaly-Detection with Apache License 2.0 | 4 votes |
def get_precision_recall(args, score, label, num_samples, beta=1.0, sampling='log', predicted_score=None): ''' :param args: :param score: anomaly scores :param label: anomaly labels :param num_samples: the number of threshold samples :param beta: :param scale: :return: ''' if predicted_score is not None: score = score - torch.FloatTensor(predicted_score).squeeze().to(args.device) maximum = score.max() if sampling=='log': # Sample thresholds logarithmically # The sampled thresholds are logarithmically spaced between: math:`10 ^ {start}` and: math:`10 ^ {end}`. th = torch.logspace(0, torch.log10(torch.tensor(maximum)), num_samples).to(args.device) else: # Sample thresholds equally # The sampled thresholds are equally spaced points between: attr:`start` and: attr:`end` th = torch.linspace(0, maximum, num_samples).to(args.device) precision = [] recall = [] for i in range(len(th)): anomaly = (score > th[i]).float() idx = anomaly * 2 + label tn = (idx == 0.0).sum().item() # tn fn = (idx == 1.0).sum().item() # fn fp = (idx == 2.0).sum().item() # fp tp = (idx == 3.0).sum().item() # tp p = tp / (tp + fp + 1e-7) r = tp / (tp + fn + 1e-7) if p != 0 and r != 0: precision.append(p) recall.append(r) precision = torch.FloatTensor(precision) recall = torch.FloatTensor(recall) f1 = (1 + beta ** 2) * (precision * recall).div(beta ** 2 * precision + recall + 1e-7) return precision, recall, f1