Python torch.randperm() Examples
The following are 30
code examples of torch.randperm().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: sampler.py From seamseg with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __iter__(self): batches = self._generate_batches() g = torch.Generator() g.manual_seed(self._epoch) indices = list(torch.randperm(len(batches), generator=g)) # add extra samples to make it evenly divisible indices += indices[:(self.num_batches * self.num_replicas - len(indices))] assert len(indices) == self.num_batches * self.num_replicas # subsample offset = self.num_batches * self.rank indices = indices[offset:offset + self.num_batches] assert len(indices) == self.num_batches for idx in indices: batch = sorted(batches[idx], key=lambda i: i["ar"]) batch = [i["id"] for i in batch] yield batch
Example #2
Source File: random_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def random_choice(self, gallery, num): """Random select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: gallery = torch.tensor( gallery, dtype=torch.long, device=torch.cuda.current_device()) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds
Example #3
Source File: random.py From pytorch_geometric with MIT License | 6 votes |
def barabasi_albert_graph(num_nodes, num_edges): r"""Returns the :obj:`edge_index` of a Barabasi-Albert preferential attachment model, where a graph of :obj:`num_nodes` nodes grows by attaching new nodes with :obj:`num_edges` edges that are preferentially attached to existing nodes with high degree. Args: num_nodes (int): The number of nodes. num_edges (int): The number of edges from a new node to existing nodes. """ assert num_edges > 0 and num_edges < num_nodes row, col = torch.arange(num_edges), torch.randperm(num_edges) for i in range(num_edges, num_nodes): row = torch.cat([row, torch.full((num_edges, ), i, dtype=torch.long)]) choice = np.random.choice(torch.cat([row, col]).numpy(), num_edges) col = torch.cat([col, torch.from_numpy(choice)]) edge_index = torch.stack([row, col], dim=0) edge_index, _ = remove_self_loops(edge_index) edge_index = to_undirected(edge_index, num_nodes) return edge_index
Example #4
Source File: test_glob.py From pytorch_geometric with MIT License | 6 votes |
def test_permuted_global_pool(): N_1, N_2 = 4, 6 x = torch.randn(N_1 + N_2, 4) batch = torch.cat([torch.zeros(N_1), torch.ones(N_2)]).to(torch.long) perm = torch.randperm(N_1 + N_2) px = x[perm] pbatch = batch[perm] px1 = px[pbatch == 0] px2 = px[pbatch == 1] out = global_add_pool(px, pbatch) assert out.size() == (2, 4) assert torch.allclose(out[0], px1.sum(dim=0)) assert torch.allclose(out[1], px2.sum(dim=0)) out = global_mean_pool(px, pbatch) assert out.size() == (2, 4) assert torch.allclose(out[0], px1.mean(dim=0)) assert torch.allclose(out[1], px2.mean(dim=0)) out = global_max_pool(px, pbatch) assert out.size() == (2, 4) assert torch.allclose(out[0], px1.max(dim=0)[0]) assert torch.allclose(out[1], px2.max(dim=0)[0])
Example #5
Source File: model_utils.py From medicaldetectiontoolkit with Apache License 2.0 | 6 votes |
def shem(roi_probs_neg, negative_count, ohem_poolsize): """ stochastic hard example mining: from a list of indices (referring to non-matched predictions), determine a pool of highest scoring (worst false positives) of size negative_count*ohem_poolsize. Then, sample n (= negative_count) predictions of this pool as negative examples for loss. :param roi_probs_neg: tensor of shape (n_predictions, n_classes). :param negative_count: int. :param ohem_poolsize: int. :return: (negative_count). indices refer to the positions in roi_probs_neg. If pool smaller than expected due to limited negative proposals availabel, this function will return sampled indices of number < negative_count without throwing an error. """ # sort according to higehst foreground score. probs, order = roi_probs_neg[:, 1:].max(1)[0].sort(descending=True) select = torch.tensor((ohem_poolsize * int(negative_count), order.size()[0])).min().int() pool_indices = order[:select] rand_idx = torch.randperm(pool_indices.size()[0]) return pool_indices[rand_idx[:negative_count].cuda()]
Example #6
Source File: distributed.py From SegmenTron with Apache License 2.0 | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset: offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #7
Source File: trainer.py From treelstm.pytorch with MIT License | 6 votes |
def train(self, dataset): self.model.train() self.optimizer.zero_grad() total_loss = 0.0 indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu') for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''): ltree, linput, rtree, rinput, label = dataset[indices[idx]] target = utils.map_label_to_target(label, dataset.num_classes) linput, rinput = linput.to(self.device), rinput.to(self.device) target = target.to(self.device) output = self.model(ltree, linput, rtree, rinput) loss = self.criterion(output, target) total_loss += loss.item() loss.backward() if idx % self.args.batchsize == 0 and idx > 0: self.optimizer.step() self.optimizer.zero_grad() self.epoch += 1 return total_loss / len(dataset) # helper function for testing
Example #8
Source File: score_hlr_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def random_choice(gallery, num): """Randomly select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: gallery = torch.tensor( gallery, dtype=torch.long, device=torch.cuda.current_device()) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds
Example #9
Source File: utils.py From hidden-networks with Apache License 2.0 | 6 votes |
def one_batch_dataset(dataset, batch_size): print("==> Grabbing a single batch") perm = torch.randperm(len(dataset)) one_batch = [dataset[idx.item()] for idx in perm[:batch_size]] class _OneBatchWrapper(Dataset): def __init__(self): self.batch = one_batch def __getitem__(self, index): return self.batch[index] def __len__(self): return len(self.batch) return _OneBatchWrapper()
Example #10
Source File: normalize_rotation.py From pytorch_geometric with MIT License | 6 votes |
def __call__(self, data): pos = data.pos if self.max_points > 0 and pos.size(0) > self.max_points: perm = torch.randperm(pos.size(0)) pos = pos[perm[:self.max_points]] pos = pos - pos.mean(dim=0, keepdim=True) C = torch.matmul(pos.t(), pos) e, v = torch.eig(C, eigenvectors=True) # v[:,j] is j-th eigenvector data.pos = torch.matmul(data.pos, v) if 'norm' in data: data.norm = F.normalize(torch.matmul(data.norm, v)) return data
Example #11
Source File: utils.py From Semantic-Aware-Scene-Recognition with MIT License | 6 votes |
def mixup_data(x, y, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam
Example #12
Source File: distributed.py From Res2Net-maskrcnn with MIT License | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #13
Source File: sampler.py From AerialDetection with Apache License 2.0 | 6 votes |
def __iter__(self): # deterministically shuffle based on epoch if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
Example #14
Source File: aug_color-t1.py From DSMnet with Apache License 2.0 | 6 votes |
def __call__(self, img): if self.transforms is None: return img group = min(self.group, img.shape[0]//3) same_group = self.same_group and (group>1) range_img = group*3 if(same_group): self.order = torch.randperm(len(self.transforms)) for i in self.order: img[:range_img] = self.transforms[i](img[:range_img]) else: for grp in range(group): idx = 3*grp self.order = torch.randperm(len(self.transforms)) for i in self.order: img[idx:idx+3] = self.transforms[i](img[idx:idx+3]) img[:range_img] = img[:range_img].clamp(0, 1) return img
Example #15
Source File: aug_color.py From DSMnet with Apache License 2.0 | 6 votes |
def __call__(self, img): if self.transforms is None: return img group = min(self.group, img.shape[0]//3) same_group = self.same_group and (group>1) range_img = group*3 if(same_group): self.order = torch.randperm(len(self.transforms)) for i in self.order: img[:range_img] = self.transforms[i](img[:range_img]) else: for grp in range(group): idx = 3*grp self.order = torch.randperm(len(self.transforms)) for i in self.order: img[idx:idx+3] = self.transforms[i](img[idx:idx+3]) img[:range_img] = img[:range_img].clamp(0, 1) return img
Example #16
Source File: repeat_factor.py From Parsing-R-CNN with MIT License | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = self._get_epoch_indices(g) randperm = torch.randperm(len(indices), generator=g).tolist() indices = indices[randperm] else: g = torch.Generator() g.manual_seed(self.epoch) indices = self._get_epoch_indices(g) # indices = torch.arange(len(self.dataset)).tolist() # when balance len(indices) diff from dataset image_num self.total_size = len(indices) logging_rank('balance sample total_size: {}'.format(self.total_size), distributed=1, local_rank=self.rank) # subsample self.num_samples = int(len(indices) / self.num_replicas) offset = self.num_samples * self.rank indices = indices[offset: offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #17
Source File: distributed.py From Parsing-R-CNN with MIT License | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #18
Source File: sampler.py From LEDNet with MIT License | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset: offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #19
Source File: distributed.py From EMANet with GNU General Public License v3.0 | 6 votes |
def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = list(torch.randperm(len(self.dataset), generator=g)) # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset:offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #20
Source File: fixed_points.py From pytorch_geometric with MIT License | 6 votes |
def __call__(self, data): num_nodes = data.num_nodes if self.replace: choice = np.random.choice(num_nodes, self.num, replace=True) choice = torch.from_numpy(choice).to(torch.long) elif not self.allow_duplicates: choice = torch.randperm(num_nodes)[:self.num] else: choice = torch.cat([ torch.randperm(num_nodes) for _ in range(math.ceil(self.num / num_nodes)) ], dim=0)[:self.num] for key, item in data: if bool(re.search('edge', key)): continue if torch.is_tensor(item) and item.size(0) == num_nodes: data[key] = item[choice] return data
Example #21
Source File: distributed.py From R2CNN.pytorch with MIT License | 6 votes |
def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #22
Source File: distributed_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def __iter__(self): # deterministically shuffle based on epoch if self.shuffle: g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices)
Example #23
Source File: grid_roi_head.py From mmdetection with Apache License 2.0 | 5 votes |
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): """Run forward function and calculate loss for box head in training.""" bbox_results = super(GridRoIHead, self)._bbox_forward_train(x, sampling_results, gt_bboxes, gt_labels, img_metas) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, img_metas) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) # GN in head does not support zero shape input if pos_rois.shape[0] == 0: return bbox_results grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid )] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) grid_targets = self.grid_head.get_targets(sampling_results, self.train_cfg) grid_targets = grid_targets[sample_idx] loss_grid = self.grid_head.loss(grid_pred, grid_targets) bbox_results['loss_bbox'].update(loss_grid) return bbox_results
Example #24
Source File: sampler.py From EMANet with GNU General Public License v3.0 | 5 votes |
def __iter__(self): return iter(torch.randperm(len(self.data_source)).long())
Example #25
Source File: main.py From kaggle-rcic-1st with MIT License | 5 votes |
def transform_input(args, X, S, Y): """Apply mixup, cutmix, and label-smoothing""" Y = smooth_label(args, Y) if args.mixup != 0 or args.cutmix != 0: perm = torch.randperm(args.batch_size).cuda() if args.mixup != 0: coeffs = torch.tensor(np.random.beta(args.mixup, args.mixup, args.batch_size), dtype=torch.float32).cuda() X = coeffs.view(-1, 1, 1, 1) * X + (1 - coeffs.view(-1, 1, 1, 1)) * X[perm,] S = coeffs.view(-1, 1) * S + (1 - coeffs.view(-1, 1)) * S[perm,] Y = coeffs.view(-1, 1) * Y + (1 - coeffs.view(-1, 1)) * Y[perm,] if args.cutmix != 0: img_height, img_width = X.size()[2:] lambd = np.random.beta(args.cutmix, args.cutmix) column = np.random.uniform(0, img_width) row = np.random.uniform(0, img_height) height = (1 - lambd) ** 0.5 * img_height width = (1 - lambd) ** 0.5 * img_width r1 = round(max(0, row - height / 2)) r2 = round(min(img_height, row + height / 2)) c1 = round(max(0, column - width / 2)) c2 = round(min(img_width, column + width / 2)) if r1 < r2 and c1 < c2: X[:, :, r1:r2, c1:c2] = X[perm, :, r1:r2, c1:c2] lambd = 1 - (r2 - r1) * (c2 - c1) / (img_height * img_width) S = S * lambd + S[perm] * (1 - lambd) Y = Y * lambd + Y[perm] * (1 - lambd) return X, S, Y
Example #26
Source File: LSTM.py From Action-Recognition with MIT License | 5 votes |
def train(model, num_epoch, num_iter, lr=1e-3,rec_interval=2, disp_interval=10): optimizer = optim.Adam(model.parameters(), lr) loss_values = [] rec_step = 0 for eph in range(num_epoch): print('epoch {} starting ...'.format(eph)) avg_loss = 0 n_samples = 0 randpermed = torch.randperm(trainingData.size()[0])[:num_iter] for i in range(num_iter): model.hidden = (model.hidden[0].detach(), model.hidden[1].detach()) model.zero_grad() j = randpermed[i] X,Y = trainingData[j,:,:,:].view(300,1,75),labels[j,:] #print(X.size()) n_samples += len(X) X = autograd.Variable(X) #print(X) Y = autograd.Variable(Y.view(1)) y_hat = model(X) loss = F.cross_entropy(y_hat, Y) avg_loss += loss.data[0] if i % disp_interval == 0: print('epoch: %d iterations: %d loss :%g' % (eph, i, loss.data[0])) if rec_step%rec_interval==0: loss_values.append(loss.data[0]) loss.backward() optimizer.step() rec_step += 1 avg_loss /= n_samples #evaluating model accuracy #TrainAcc() print('epoch: {} <====train track===> avg_loss: {} \n'.format(eph, avg_loss)) return loss_values #l = train(model0, 10, 100, 2, 20)
Example #27
Source File: temporal_dim.py From atari-representation-learning with MIT License | 5 votes |
def do_one_epoch(self, epoch, episodes): mode = "train" if self.encoder.training and self.classifier1.training else "val" epoch_loss, accuracy, steps = 0., 0., 0 accuracy1 = 0. epoch_loss1 = 0. data_generator = self.generate_batch(episodes) for x_t, x_tprev, x_that, ts, thats in data_generator: f_t_maps, f_t_prev_maps = self.encoder(x_t, fmaps=True), self.encoder(x_tprev, fmaps=True) f_t_hat_maps = self.encoder(x_that, fmaps=True) # Loss 1: Global at time t, f5 patches at time t-1 f_t, f_t_prev = f_t_maps['out'], f_t_prev_maps['f5'] f_t_hat = f_t_hat_maps['f5'] f_t = f_t.unsqueeze(1).unsqueeze(1).expand(-1, f_t_prev.size(1), f_t_prev.size(2), self.encoder.hidden_size) target = torch.cat((torch.ones_like(f_t[:, :, :, 0]), torch.zeros_like(f_t[:, :, :, 0])), dim=0).to(self.device) x1, x2 = torch.cat([f_t, f_t], dim=0), torch.cat([f_t_prev, f_t_hat], dim=0) shuffled_idxs = torch.randperm(len(target)) x1, x2, target = x1[shuffled_idxs], x2[shuffled_idxs], target[shuffled_idxs] self.optimizer.zero_grad() loss1 = self.loss_fn(self.classifier1(x1, x2).squeeze(), target) if mode == "train": loss1.backward() self.optimizer.step() epoch_loss1 += loss1.detach().item() preds1 = torch.sigmoid(self.classifier1(x1, x2).squeeze()) accuracy1 += calculate_accuracy(preds1, target) steps += 1 self.log_results(epoch, epoch_loss1 / steps, accuracy1 / steps, prefix=mode) if mode == "val": self.early_stopper(accuracy1 / steps, self.encoder)
Example #28
Source File: LSTM2.py From Action-Recognition with MIT License | 5 votes |
def train(model, num_epoch, num_iter, lr=1e-3,rec_interval=2, disp_interval=10): optimizer = optim.Adam(model.parameters(), lr) loss_values = [] rec_step = 0 for eph in range(num_epoch): print('epoch {} starting ...'.format(eph)) avg_loss = 0 n_samples = 0 randpermed = torch.randperm(trainingData.size()[0])[:num_iter] for i in range(num_iter): model.hidden = (model.hidden[0].detach(), model.hidden[1].detach()) model.zero_grad() j = randpermed[i] X,Y = trainingData[j,:,:,:].view(300,1,75),labels[j,:] #print(X.size()) n_samples += len(X) X = autograd.Variable(X) #print(X) Y = autograd.Variable(Y.view(1)) y_hat = model(X) loss = F.cross_entropy(y_hat, Y) avg_loss += loss.data[0] if i % disp_interval == 0: print('epoch: %d iterations: %d loss :%g' % (eph, i, loss.data[0])) if rec_step%rec_interval==0: loss_values.append(loss.data[0]) loss.backward() optimizer.step() rec_step += 1 avg_loss /= n_samples #evaluating model accuracy #TrainAcc() print('epoch: {} <====train track===> avg_loss: {} \n'.format(eph, avg_loss)) return loss_values #l = train(model0, 10, 100, 2, 20)
Example #29
Source File: dataset.py From EMANet with GNU General Public License v3.0 | 5 votes |
def random_split(dataset, lengths): """ Randomly split a dataset into non-overlapping new datasets of given lengths ds Arguments: dataset (Dataset): Dataset to be split lengths (iterable): lengths of splits to be produced """ if sum(lengths) != len(dataset): raise ValueError("Sum of input lengths does not equal the length of the input dataset!") indices = randperm(sum(lengths)) return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]
Example #30
Source File: sampler.py From EMANet with GNU General Public License v3.0 | 5 votes |
def __iter__(self): return (self.indices[i] for i in torch.randperm(len(self.indices)))