Python torch.unique() Examples

The following are 30 code examples of torch.unique(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: pointnet2.py    From dgl with Apache License 2.0 7 votes vote down vote up
def forward(self, pos, centroids, feat=None):
        dev = pos.device
        group_idx = self.frnn(pos, centroids)
        B, N, _ = pos.shape
        glist = []
        for i in range(B):
            center = torch.zeros((N)).to(dev)
            center[centroids[i]] = 1
            src = group_idx[i].contiguous().view(-1)
            dst = centroids[i].view(-1, 1).repeat(1, self.n_neighbor).view(-1)

            unified = torch.cat([src, dst])
            uniq, inv_idx = torch.unique(unified, return_inverse=True)
            src_idx = inv_idx[:src.shape[0]]
            dst_idx = inv_idx[src.shape[0]:]

            g = dgl.DGLGraph((src_idx.cpu(), dst_idx.cpu()), readonly=True)
            g.ndata['pos'] = pos[i][uniq]
            g.ndata['center'] = center[uniq]
            if feat is not None:
                g.ndata['feat'] = feat[i][uniq]
            glist.append(g)
        bg = dgl.batch(glist)
        return bg 
Example #2
Source File: cacd-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #3
Source File: losses.py    From uois with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x, target):
        """ Compute masked cosine similarity loss
            @param x: a [N x H x W] torch.FloatTensor of foreground logits
            @param target: a [N x H x W] torch.FloatTensor of values in [0, 1]
        """
        temp = self.BCEWithLogitsLoss(x, target) # Shape: [N x H x W]. values are in [0, 1]

        if self.weighted:
            # Compute pixel weights
            weight_mask = torch.zeros_like(target) # Shape: [N x H x W]. weighted mean over pixels
            unique_object_labels = torch.unique(target) # Should be {0, 1}
            for obj in unique_object_labels:
                num_pixels = torch.sum(target == obj, dtype=torch.float)
                weight_mask[target == obj] = 1 / num_pixels # inversely proportional to number of pixels
        else:
            weight_mask = torch.ones_like(target) # mean over observed pixels
        loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) 

        return loss 
Example #4
Source File: test_engine.py    From Context-aware-ZSR with MIT License 6 votes vote down vote up
def Compute_AUSUC(dataset, all_scores, gt_classes, seen, unseen):
    cls_in_test = set(np.unique(gt_classes).tolist())
    seen = sorted(list(cls_in_test.intersection(set(seen))))
    unseen = sorted(list(cls_in_test.intersection(set(unseen))))
    score_S = all_scores[:, seen]
    score_U = all_scores[:, unseen]
    Y = gt_classes
    label_S = np.array(seen)
    label_U = np.array(unseen)

    AUC_val, AUC_record, acc_noBias, HM, fixed_bias = _Compute_AUSUC(
        torch.from_numpy(score_S),
        torch.from_numpy(score_U),
        torch.from_numpy(Y.astype(np.int64)),
        torch.from_numpy(label_S.astype(np.int64)),
        torch.from_numpy(label_U.astype(np.int64)))

    HM, fixed_bias = HM.item(), fixed_bias.item()
    print('AUC_val: {:.3f} HM: {:.3f} fixed_bias: {:.3f}'\
        .format(AUC_val, HM, fixed_bias))

    return {'AUC_val':AUC_val, 'AUC_record':AUC_record,\
        'acc_noBias': acc_noBias, 'HM': HM, 'fixed_bias': fixed_bias} 
Example #5
Source File: load_graph.py    From dgl with Apache License 2.0 6 votes vote down vote up
def load_ogb(name):
    from ogb.nodeproppred import DglNodePropPredDataset

    data = DglNodePropPredDataset(name=name)
    splitted_idx = data.get_idx_split()
    graph, labels = data[0]
    labels = labels[:, 0]

    graph.ndata['features'] = graph.ndata['feat']
    graph.ndata['labels'] = labels
    in_feats = graph.ndata['features'].shape[1]
    num_labels = len(th.unique(labels))

    # Find the node IDs in the training, validation, and test set.
    train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx['valid'], splitted_idx['test']
    train_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    train_mask[train_nid] = True
    val_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    val_mask[val_nid] = True
    test_mask = th.zeros((graph.number_of_nodes(),), dtype=th.bool)
    test_mask[test_nid] = True
    graph.ndata['train_mask'] = train_mask
    graph.ndata['val_mask'] = val_mask
    graph.ndata['test_mask'] = test_mask
    return graph, len(th.unique(graph.ndata['labels'])) 
Example #6
Source File: train_dist.py    From dgl with Apache License 2.0 6 votes vote down vote up
def main(args):
    th.distributed.init_process_group(backend='gloo')
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name)
    print('rank:', g.rank())

    train_nid = dgl.distributed.node_split(g.ndata['train_mask'], g.get_partition_book(), force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'], g.get_partition_book(), force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'], g.get_partition_book(), force_even=True)
    print('part {}, train: {}, val: {}, test: {}'.format(g.rank(), len(train_nid),
                                                         len(val_nid), len(test_nid)))
    device = th.device('cpu')
    n_classes = len(th.unique(g.ndata['labels'][np.arange(g.number_of_nodes())]))

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends") 
Example #7
Source File: data_parallel.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def scatter(self, data_list, device_ids):
        num_devices = min(len(device_ids), len(data_list))

        count = torch.tensor([data.num_nodes for data in data_list])
        cumsum = count.cumsum(0)
        cumsum = torch.cat([cumsum.new_zeros(1), cumsum], dim=0)
        device_id = num_devices * cumsum.to(torch.float) / cumsum[-1].item()
        device_id = (device_id[:-1] + device_id[1:]) / 2.0
        device_id = device_id.to(torch.long)  # round.
        split = device_id.bincount().cumsum(0)
        split = torch.cat([split.new_zeros(1), split], dim=0)
        split = torch.unique(split, sorted=True)
        split = split.tolist()

        return [
            Batch.from_data_list(data_list[split[i]:split[i + 1]]).to(
                torch.device('cuda:{}'.format(device_ids[i])))
            for i in range(len(split) - 1)
        ] 
Example #8
Source File: relgraphconv.py    From dgl with Apache License 2.0 6 votes vote down vote up
def bdd_message_func(self, edges):
        """Message function for block-diagonal-decomposition regularizer"""
        if edges.src['h'].dtype == th.int64 and len(edges.src['h'].shape) == 1:
            raise TypeError('Block decomposition does not allow integer ID feature.')

        # calculate msg @ W_r before put msg into edge
        if self.low_mem:
            etypes = th.unique(edges.data['type'])
            msg = th.empty((edges.src['h'].shape[0], self.out_feat),
                           device=edges.src['h'].device)
            for etype in etypes:
                loc = edges.data['type'] == etype
                w = self.weight[etype].view(self.num_bases, self.submat_in, self.submat_out)
                src = edges.src['h'][loc].view(-1, self.num_bases, self.submat_in)
                sub_msg = th.einsum('abc,bcd->abd', src, w)
                sub_msg = sub_msg.reshape(-1, self.out_feat)
                msg[loc] = sub_msg
        else:
            weight = self.weight.index_select(0, edges.data['type']).view(
                -1, self.submat_in, self.submat_out)
            node = edges.src['h'].view(-1, 1, self.submat_in)
            msg = th.bmm(node, weight).view(-1, self.out_feat)
        if 'norm' in edges.data:
            msg = msg * edges.data['norm']
        return {'msg': msg} 
Example #9
Source File: losses.py    From uois with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x, target):
        """ Compute weighted cross entropy

            @param x: a [N x C x H x W] torch.FloatTensor of values
            @param target: a [N x H x W] torch.LongTensor of values
        """
        temp = self.CrossEntropyLoss(x, target) # Shape: [N x H x W]

        # Compute pixel weights
        weight_mask = torch.zeros_like(target).float() # Shape: [N x H x W]. weighted mean over pixels
        unique_object_labels = torch.unique(target)
        for obj in unique_object_labels:
            num_pixels = torch.sum(target == obj, dtype=torch.float)
            weight_mask[target == obj] = 1 / num_pixels # inversely proportional to number of pixels

        loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask) 
        return loss 
Example #10
Source File: sparse_weights_test.py    From nupic.torch with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_rezero_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)

                # Ensure weights are not sparse
                sparse.module.weight.data.fill_(1.0)

                # Rezero, verify the weights become sparse
                sparse.rezero_weights()
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
Example #11
Source File: sparse_weights_test.py    From nupic.torch with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_rezero_1d(self):
        in_features, out_features = 784, 10
        for sparsity in [0.1, 0.5, 0.9]:
            linear = torch.nn.Linear(in_features=in_features,
                                     out_features=out_features)
            sparse = SparseWeights(linear, sparsity=sparsity)

            # Ensure weights are not sparse
            sparse.module.weight.data.fill_(1.0)

            # Rezero, verify the weights become sparse
            sparse.rezero_weights()
            nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
            counts = torch.unique(nonzeros, return_counts=True)[1]
            expected = [round(in_features * (1.0 - sparsity))] * out_features
            self.assertSequenceEqual(counts.numpy().tolist(), expected) 
Example #12
Source File: cacd-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #13
Source File: morph-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #14
Source File: morph-coral.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #15
Source File: afad-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #16
Source File: afad-coral.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #17
Source File: sparse_weights_test.py    From nupic.torch with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_sparse_weights_2d(self):
        in_channels, kernel_size, out_channels = 64, (5, 5), 64
        input_size = in_channels * kernel_size[0] * kernel_size[1]

        with torch.no_grad():
            for sparsity in [0.1, 0.5, 0.9]:
                cnn = torch.nn.Conv2d(in_channels=in_channels,
                                      out_channels=out_channels,
                                      kernel_size=kernel_size)
                sparse = SparseWeights2d(cnn, sparsity=sparsity)
                nonzeros = torch.nonzero(sparse.module.weight, as_tuple=True)[0]
                counts = torch.unique(nonzeros, return_counts=True)[1]

                # Expected non-zeros per output channel
                expected = [round(input_size * (1.0 - sparsity))] * out_channels
                self.assertSequenceEqual(counts.numpy().tolist(), expected) 
Example #18
Source File: afad-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #19
Source File: afad-coral.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #20
Source File: morph-ordinal.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #21
Source File: morph-coral.py    From coral-cnn with MIT License 6 votes vote down vote up
def task_importance_weights(label_array):
    uniq = torch.unique(label_array)
    num_examples = label_array.size(0)

    m = torch.zeros(uniq.shape[0])

    for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):
        m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), 
                                      num_examples - label_array[label_array > t].size(0)]))
        m[i] = torch.sqrt(m_k.float())

    imp = m/torch.max(m)
    return imp


# Data-specific scheme 
Example #22
Source File: transforms.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def roi2bbox(rois):
    """Convert rois to bounding box format.

    Args:
        rois (torch.Tensor): RoIs with the shape (n, 5) where the first
            column indicates batch id of each RoI.

    Returns:
        list[torch.Tensor]: Converted boxes of corresponding rois.
    """
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
Example #23
Source File: transforms.py    From IoU-Uniform-R-CNN with Apache License 2.0 5 votes vote down vote up
def roi2bbox(rois):
    bbox_list = []
    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
    for img_id in img_ids:
        inds = (rois[:, 0] == img_id.item())
        bbox = rois[inds, 1:]
        bbox_list.append(bbox)
    return bbox_list 
Example #24
Source File: grouped_batch_sampler.py    From sampling-free with MIT License 5 votes vote down vote up
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False 
Example #25
Source File: grouped_batch_sampler.py    From maskrcnn-benchmark with MIT License 5 votes vote down vote up
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False 
Example #26
Source File: grouped_batch_sampler.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False 
Example #27
Source File: __init__.py    From occupancy_flow with MIT License 5 votes vote down vote up
def return_time_steps(self, t):
        ''' Returns time steps for the ODE Solver.
        The time steps are ordered, duplicates are removed, and time 0
        is added for the start.

        Args:
            t (tensor): time values
        '''
        device = self.device
        t_steps_eval, t_order = torch.unique(
            torch.cat([torch.zeros(1).to(device), t]), sorted=True,
            return_inverse=True)
        return t_steps_eval, t_order[1:] 
Example #28
Source File: filter.py    From spectre with Apache License 2.0 5 votes vote down vote up
def compute(self, data: torch.Tensor) -> torch.Tensor:
        classes = torch.unique(data, sorted=False)
        classes = classes[~torch.isnan(classes)]
        one_hot = []
        if classes.shape[0] > 1000:
            warnings.warn("One hot encoding with too many features: ({}). "
                          .format(classes.shape[0]),
                          RuntimeWarning)
        for i in range(classes.shape[0]):
            one_hot.append((data == classes[i]).unsqueeze(-1))
        return torch.cat(one_hot, dim=-1) 
Example #29
Source File: sampling_utils.py    From pt-ranking.github.io with MIT License 5 votes vote down vote up
def uniform_rand_per_label(uni_cnts):
    """ can be compatible with batch """
    num_unis = uni_cnts.size(0)  # number of unique elements
    inner_rand_inds = (torch.rand(num_unis) * uni_cnts.type(tensor)).type(
        torch.LongTensor)  # random index w.r.t each interval
    begs = torch.cumsum(torch.cat([tensor([0.]).type(torch.LongTensor), uni_cnts[0:num_unis - 1]]),
                        dim=0)  # begin positions of each interval within the same vector
    # print('begin positions', begs)
    rand_inds_per_label = begs + inner_rand_inds
    # print('random index', rand_inds_per_label)  # random index tensor([ 0,  1,  3,  6, 10]) ([0, 2, 3, 5, 8])

    return rand_inds_per_label 
Example #30
Source File: grouped_batch_sampler.py    From HRNet-MaskRCNN-Benchmark with MIT License 5 votes vote down vote up
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False