Python torch.nn.functional.multi_margin_loss() Examples
The following are 5
code examples of torch.nn.functional.multi_margin_loss().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: mulrel_ranker.py From mulrel-nel with Apache License 2.0 | 6 votes |
def loss(self, scores, true_pos, lamb=1e-7): loss = F.multi_margin_loss(scores, true_pos, margin=self.margin) if self.use_local_only: return loss # regularization X = F.normalize(self.rel_embs) diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt() diff = diff * (diff < 1).float() loss -= torch.sum(diff).mul(lamb) X = F.normalize(self.ew_embs) diff = (X.view(self.n_rels, 1, -1) - X.view(1, self.n_rels, -1)).pow(2).sum(dim=2).add_(1e-5).sqrt() diff = diff * (diff < 1).float() loss -= torch.sum(diff).mul(lamb) return loss
Example #2
Source File: losses.py From coling2018-graph-neural-networks-question-answering with Apache License 2.0 | 6 votes |
def forward(self, predictions, target): loss = Variable(torch.zeros(1)) target_index_var = Variable(torch.LongTensor([0])) if torch.cuda.is_available(): loss = loss.cuda() target_index_var = target_index_var.cuda() target_sorted, target_indices = torch.sort(target, dim=-1, descending=True) predictions = predictions.gather(1, target_indices) margins = DEFAULT_MARGIN * target_sorted.data # margins = margins.clamp(max=1.0, min=0.5) for sample_index in range(target_indices.size(0)): target_index = 0 while target_index < min(target_indices.size(1), 10) and \ (target_sorted[sample_index, target_index].data[0] > MIN_TARGET_VALUE): loss += F.multi_margin_loss(predictions[sample_index, target_index:], target_index_var, margin=margins[sample_index, target_index], size_average=False) target_index += 1 return loss
Example #3
Source File: local_ctx_att_ranker.py From mulrel-nel with Apache License 2.0 | 5 votes |
def loss(self, scores, true_pos): loss = F.multi_margin_loss(scores, true_pos, margin=self.margin) return loss
Example #4
Source File: batch_metrics.py From poutyne with GNU Lesser General Public License v3.0 | 5 votes |
def multi_margin(y_pred, y_true): return F.multi_margin_loss(y_pred, y_true)
Example #5
Source File: fast_rcnn_heads.py From Context-aware-ZSR with MIT License | 4 votes |
def fast_rcnn_losses(cls_score, bbox_pred, label_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights): device_id = cls_score.get_device() rois_label = Variable(torch.from_numpy(label_int32.astype('int64'))).cuda(device_id) if cfg.FAST_RCNN.LOSS_TYPE in ['cross_entropy', 'triplet_softmax']: if cfg.FAST_RCNN.LOSS_TYPE == 'triplet_softmax': cls_score = cls_score * 3 # This method is borrowed from ji zhang's large scale relationship detection if not cfg.MODEL.TAGGING: loss_cls = F.cross_entropy(cls_score, rois_label) else: loss_cls = F.cross_entropy(cls_score, rois_label, ignore_index=0) if cfg.FAST_RCNN.LOSS_TYPE == 'triplet_softmax': cls_score = cls_score / 3 else: if cfg.FAST_RCNN.LOSS_TYPE == 'multi_margin': loss_cls = F.multi_margin_loss(cls_score, rois_label, margin=cfg.FAST_RCNN.MARGIN, reduction='none') elif cfg.FAST_RCNN.LOSS_TYPE == 'max_margin': cls_score_with_high_target = cls_score.clone() cls_score_with_high_target.scatter_(1, rois_label.view(-1, 1), 1e10) # This make sure the following variable always has the target in the first column target_and_offender_index = cls_score_with_high_target.sort(1, True)[1][:, :2] # Target and the largest score excpet target loss_cls = F.multi_margin_loss(cls_score.gather(1, target_and_offender_index), rois_label.data * 0, margin=cfg.FAST_RCNN.MARGIN, reduction='none') loss_cls = loss_cls[rois_label > 0] loss_cls = loss_cls.mean() if loss_cls.numel() > 0 else loss_cls.new_tensor(0) # Secretly log the mean similarity! if cfg.FAST_RCNN.LOSS_TYPE in ['triplet_softmax', 'max_margin', 'multi_margin']: loss_cls.mean_similarity = cls_score[rois_label>0].gather(1, rois_label[rois_label>0].unsqueeze(1)).mean().detach() / 3 bbox_targets = Variable(torch.from_numpy(bbox_targets)).cuda(device_id) bbox_inside_weights = Variable(torch.from_numpy(bbox_inside_weights)).cuda(device_id) bbox_outside_weights = Variable(torch.from_numpy(bbox_outside_weights)).cuda(device_id) loss_bbox = net_utils.smooth_l1_loss( bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights) if cfg.MODEL.TAGGING: loss_bbox = torch.zeros_like(loss_bbox) # class accuracy cls_preds = cls_score.max(dim=1)[1].type_as(rois_label) if not cfg.MODEL.TAGGING: accuracy_cls = cls_preds.eq(rois_label).float().mean(dim=0) else: accuracy_cls = cls_preds[rois_label > 0].eq(rois_label[rois_label > 0]).float().mean(dim=0) # Ignore index 0 return loss_cls, loss_bbox, accuracy_cls # ---------------------------------------------------------------------------- # # Box heads # ---------------------------------------------------------------------------- #