Python torch.nn.functional.margin_ranking_loss() Examples

The following are 9 code examples of torch.nn.functional.margin_ranking_loss(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: rank_hinge_loss.py    From MatchZoo-py with Apache License 2.0 6 votes vote down vote up
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor):
        """
        Calculate rank hinge loss.

        :param y_pred: Predicted result.
        :param y_true: Label.
        :return: Hinge loss computed by user-defined margin.
        """
        y_pos = y_pred[::(self.num_neg + 1), :]
        y_neg = []
        for neg_idx in range(self.num_neg):
            neg = y_pred[(neg_idx + 1)::(self.num_neg + 1), :]
            y_neg.append(neg)
        y_neg = torch.cat(y_neg, dim=-1)
        y_neg = torch.mean(y_neg, dim=-1, keepdim=True)
        y_true = torch.ones_like(y_pos)
        return F.margin_ranking_loss(
            y_pos, y_neg, y_true,
            margin=self.margin,
            reduction=self.reduction
        ) 
Example #2
Source File: base.py    From incremental_learning.pytorch with MIT License 6 votes vote down vote up
def ucir_ranking(logits, targets, n_classes, task_size, nb_negatives=2, margin=0.2):
    return github_ucir_ranking_mr(logits, targets, n_classes, task_size, nb_negatives, margin)
    # Ranking loss maximizing the inter-class separation between old & new:

    # 1. Fetching from the batch only samples from the batch that belongs
    #    to old classes:
    old_indexes = targets.lt(n_classes - 1)
    old_logits = logits[old_indexes]
    old_targets = targets[old_indexes]

    # 2. Getting positive values, aka ground-truth's logit predictions:
    old_values = old_logits[torch.arange(len(old_logits)), old_targets]
    old_values = old_values.repeat(nb_negatives, 1).t().contiguous().view(-1)

    # 3. Getting top-k negative values:
    nb_old_classes = n_classes - task_size
    negative_indexes = old_logits[..., nb_old_classes:].argsort(dim=1, descending=True)[
        ..., :nb_negatives] + nb_old_classes
    new_values = old_logits[torch.arange(len(old_logits)).view(-1, 1), negative_indexes].view(-1)

    return F.margin_ranking_loss(
        old_values, new_values, -torch.ones(len(old_values)).to(logits.device), margin=margin
    ) 
Example #3
Source File: margin_ranking_loss.py    From mmfashion with Apache License 2.0 5 votes vote down vote up
def forward(self, input1, input2, target):
        return self.loss_weight * F.margin_ranking_loss(
            input1,
            input2,
            target,
            margin=self.margin,
            reduction=self.reduction) 
Example #4
Source File: losses.py    From ViP with MIT License 5 votes vote down vote up
def loss(self, predictions, data):
        """
        Args:
            predictions (List): 
                - output (Tensor, shape [2*T, 2]): Positive and negative attention weights for each sample
                - loss_weigh (Tensor, shape [2*T, 1]): Loss weighting applied to each sampled frame
            data        (None) 

            T: number of sampled frames from video (default: 5)
        Return:
            Frame-wise weighting loss 
        """
        output, loss_weigh = predictions

        if self.loss_weighting or self.obj_interact: 
            rank_batch = F.margin_ranking_loss(output[:,0:1], output[:,1:2], 
                torch.ones(output.size()).type(output.data.type()), margin=self.ranking_margin, reduction='none')
            if self.loss_weighting and self.obj_interact:
                loss_weigh = (output[:, 0:1]+loss_weigh)/2. # avg
            elif self.loss_weighting:
                loss_weigh = output[:,0:1]
            else:
                loss_weigh = loss_weigh.unsqueeze(1)
            # ranking loss
            cls_loss = self.loss_factor*(rank_batch*loss_weigh).mean()+ \
                        (1-self.loss_factor)*-torch.log(2*loss_weigh).mean()
        else:
            # ranking loss
            cls_loss = F.margin_ranking_loss(output[:,0:1], output[:,1:2],
                torch.Tensor([[1],[1]]).type(output.data.type()), margin=self.ranking_margin)


        return cls_loss 
Example #5
Source File: metric_loss.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def __call__(self, _, global_features, targets):
        if self._normalize_feature:
            global_features = normalize(global_features, axis=-1)

        dist_mat = euclidean_dist(global_features, global_features)

        N = dist_mat.size(0)
        is_pos = targets.expand(N, N).eq(targets.expand(N, N).t())
        is_neg = targets.expand(N, N).ne(targets.expand(N, N).t())

        if self._hard_mining:
            dist_ap, dist_an = hard_example_mining(dist_mat, is_pos, is_neg)
        else:
            dist_ap, dist_an = weighted_example_mining(dist_mat, is_pos, is_neg)

        y = dist_an.new().resize_as_(dist_an).fill_(1)

        if self._margin > 0:
            loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self._margin)
        else:
            loss = F.soft_margin_loss(dist_an - dist_ap, y)
            if loss == float('Inf'): loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)

        return {
            "loss_triplet": loss * self._scale,
        } 
Example #6
Source File: train.py    From PaperRobot with MIT License 5 votes vote down vote up
def validate(epoch):
    t = time.time()
    model.eval()
    torch.set_grad_enabled(False)
    eloss = 0
    for batch_idx, instance in enumerate(valid_generator):
        pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
        pos = pos.to(device)
        neg = neg.to(device)
        # text information
        pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
        ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
        nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
        ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
        batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
        # get relative location according to the batch_nodes
        shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
        batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
        batch_adj = torch.from_numpy(batch_adj).to(device)
        shifted_pos = torch.LongTensor(shifted_pos).to(device)
        shifted_neg = torch.LongTensor(shifted_neg).to(device)
        score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
                    ptt[0], ptt[1], ptt[2])
        score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
                    ntt[0], ntt[1], ntt[2])
        loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
        sys.stdout.write(
            '%d batches processed. current valid batch loss: %f\r' %
            (batch_idx, loss_train.item())
        )
        eloss += loss_train.item()
        del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
        if batch_idx%500==0:
            gc.collect()
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_valid: {:.4f}'.format(eloss/(batch_idx+1)),
          'time: {:.4f}s'.format(time.time() - t))

    return eloss 
Example #7
Source File: triplet.py    From reid-mgn with MIT License 5 votes vote down vote up
def forward(self, input, target):
        y_true = target.int().unsqueeze(-1)
        same_id = torch.eq(y_true, y_true.t()).type_as(input)

        pos_mask = same_id
        neg_mask = 1 - same_id

        def _mask_max(input_tensor, mask, axis=None, keepdims=False):
            input_tensor = input_tensor - 1e6 * (1 - mask)
            _max, _idx = torch.max(input_tensor, dim=axis, keepdim=keepdims)
            return _max, _idx

        def _mask_min(input_tensor, mask, axis=None, keepdims=False):
            input_tensor = input_tensor + 1e6 * (1 - mask)
            _min, _idx = torch.min(input_tensor, dim=axis, keepdim=keepdims)
            return _min, _idx

        # output[i, j] = || feature[i, :] - feature[j, :] ||_2
        dist_squared = torch.sum(input ** 2, dim=1, keepdim=True) + \
                       torch.sum(input.t() ** 2, dim=0, keepdim=True) - \
                       2.0 * torch.matmul(input, input.t())
        dist = dist_squared.clamp(min=1e-16).sqrt()

        pos_max, pos_idx = _mask_max(dist, pos_mask, axis=-1)
        neg_min, neg_idx = _mask_min(dist, neg_mask, axis=-1)

        # loss(x, y) = max(0, -y * (x1 - x2) + margin)
        y = torch.ones(same_id.size()[0]).to(DEVICE)
        return F.margin_ranking_loss(neg_min.float(),
                                     pos_max.float(),
                                     y,
                                     self.margin,
                                     self.size_average) 
Example #8
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_margin_ranking_loss(self):
        inp1 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
        inp2 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
        target = (torch.randint(0, 1, (128,), device='cuda') - 1).type_as(inp1)
        output = F.margin_ranking_loss(inp1, inp2, target, margin=0, size_average=None, reduce=None, reduction='mean') 
Example #9
Source File: train.py    From PaperRobot with MIT License 4 votes vote down vote up
def train(epoch):
    print("Epoch", epoch)
    t = time.time()
    model.train(True)
    torch.set_grad_enabled(True)
    eloss = 0
    for batch_idx, instance in enumerate(train_generator):
        pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef = instance
        pos = pos.to(device)
        neg = neg.to(device)
        # text information
        pht = list(map(lambda x:x.to(device),pht_bef[0:3]))
        ptt = list(map(lambda x:x.to(device),ptt_bef[0:3]))
        nht = list(map(lambda x:x.to(device),nht_bef[0:3]))
        ntt = list(map(lambda x:x.to(device),ntt_bef[0:3]))
        batch_nodes, batch_adj = get_subgraph(pos, train_triple_dict, graph)
        # get relative location according to the batch_nodes
        shifted_pos, shifted_neg = convert_index([pos, neg], batch_nodes)
        batch_nodes = torch.LongTensor(batch_nodes.tolist()).to(device)
        batch_adj = torch.from_numpy(batch_adj).to(device)
        shifted_pos = torch.LongTensor(shifted_pos).to(device)
        shifted_neg = torch.LongTensor(shifted_neg).to(device)
        score_pos = model(batch_nodes, batch_adj, pos, shifted_pos, pht[0], pht[1], pht[2],
                    ptt[0], ptt[1], ptt[2])
        score_neg = model(batch_nodes, batch_adj, neg, shifted_neg, nht[0], nht[1], nht[2],
                    ntt[0], ntt[1], ntt[2])
        loss_train = F.margin_ranking_loss(score_pos, score_neg, y, margin=args.margin)
        sys.stdout.write(
            '%d batches processed. current train batch loss: %f\r' %
            (batch_idx, loss_train.item())
        )
        eloss += loss_train.item()
        loss_train.backward()
        del batch_nodes, batch_adj, shifted_pos, shifted_neg, pos, neg, pht_bef, ptt_bef, nht_bef, ntt_bef
        optimizer.step()
        if batch_idx%500==0:
            gc.collect()
    print('\n')
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(eloss/(batch_idx+1)),
          'time: {:.4f}s'.format(time.time() - t))

    return eloss


# Valid