Python torch.nn.functional.nll_loss() Examples
The following are 30
code examples of torch.nn.functional.nll_loss().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: train_eval.py From IGMC with MIT License | 7 votes |
def train(model, optimizer, loader, device, regression=False, ARR=0): model.train() total_loss = 0 for data in loader: optimizer.zero_grad() data = data.to(device) out = model(data) if regression: loss = F.mse_loss(out, data.y.view(-1)) else: loss = F.nll_loss(out, data.y.view(-1)) if ARR != 0: for gconv in model.convs: w = torch.matmul( gconv.att, gconv.basis.view(gconv.num_bases, -1) ).view(gconv.num_relations, gconv.in_channels, gconv.out_channels) reg_loss = torch.sum((w[1:, :, :] - w[:-1, :, :])**2) loss += ARR * reg_loss loss.backward() total_loss += loss.item() * num_graphs(data) optimizer.step() torch.cuda.empty_cache() return total_loss / len(loader.dataset)
Example #2
Source File: mlp.py From pytorch_structure2vec with MIT License | 6 votes |
def forward(self, x, y = None): h1 = self.h1_weights(x) h1 = F.relu(h1) logits = self.h2_weights(h1) logits = F.log_softmax(logits, dim=1) if y is not None: y = Variable(y) loss = F.nll_loss(logits, y) pred = logits.data.max(1, keepdim=True)[1] acc = to_scalar(pred.eq(y.data.view_as(pred)).sum()) acc = float(acc) / float(y.size()[0]) return logits, loss, acc else: return logits
Example #3
Source File: dgcnn_segmentation.py From pytorch_geometric with MIT License | 6 votes |
def train(): model.train() total_loss = correct_nodes = total_nodes = 0 for i, data in enumerate(train_loader): data = data.to(device) optimizer.zero_grad() out = model(data) loss = F.nll_loss(out, data.y) loss.backward() optimizer.step() total_loss += loss.item() correct_nodes += out.argmax(dim=1).eq(data.y).sum().item() total_nodes += data.num_nodes if (i + 1) % 10 == 0: print(f'[{i+1}/{len(train_loader)}] Loss: {total_loss / 10:.4f} ' f'Train Acc: {correct_nodes / total_nodes:.4f}') total_loss = correct_nodes = total_nodes = 0
Example #4
Source File: signed_gcn.py From pytorch_geometric with MIT License | 6 votes |
def nll_loss(self, z, pos_edge_index, neg_edge_index): """Computes the discriminator loss based on node embeddings :obj:`z`, and positive edges :obj:`pos_edge_index` and negative nedges :obj:`neg_edge_index`. Args: z (Tensor): The node embeddings. pos_edge_index (LongTensor): The positive edge indices. neg_edge_index (LongTensor): The negative edge indices. """ edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=1) none_edge_index = negative_sampling(edge_index, z.size(0)) nll_loss = 0 nll_loss += F.nll_loss( self.discriminate(z, pos_edge_index), pos_edge_index.new_full((pos_edge_index.size(1), ), 0)) nll_loss += F.nll_loss( self.discriminate(z, neg_edge_index), neg_edge_index.new_full((neg_edge_index.size(1), ), 1)) nll_loss += F.nll_loss( self.discriminate(z, none_edge_index), none_edge_index.new_full((none_edge_index.size(1), ), 2)) return nll_loss / 3.0
Example #5
Source File: MRAN.py From transferlearning with MIT License | 6 votes |
def test(model, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() s_output, t_output = model(data, data, target) test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, reduction='sum').item()# sum up batch loss pred = s_output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct
Example #6
Source File: mnist.py From dockerfiles with Apache License 2.0 | 6 votes |
def test(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) # resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28) data = data.view(-1, 28*28) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).long().cpu().sum() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
Example #7
Source File: DAN.py From transferlearning with MIT License | 6 votes |
def test(model): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for tgt_test_data, tgt_test_label in tgt_test_loader: if cuda: tgt_test_data, tgt_test_label = tgt_test_data.cuda(), tgt_test_label.cuda() tgt_test_data, tgt_test_label = Variable(tgt_test_data), Variable(tgt_test_label) tgt_pred, mmd_loss = model(tgt_test_data, tgt_test_data) test_loss += F.nll_loss(F.log_softmax(tgt_pred, dim = 1), tgt_test_label, reduction='sum').item() # sum up batch loss pred = tgt_pred.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(tgt_test_label.data.view_as(pred)).cpu().sum() test_loss /= tgt_dataset_len print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( tgt_name, test_loss, correct, tgt_dataset_len, 100. * correct / tgt_dataset_len)) return correct
Example #8
Source File: finetune.py From transferlearning with MIT License | 6 votes |
def test(self): self.model.eval() test_loss = 0 correct = 0 for data, target in self.target_test_loader: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) s_output, t_output = self.model(data, data) test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).item() # sum up batch loss pred = s_output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= self.len_target_dataset print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( target_name, test_loss, correct, self.len_target_dataset, 100. * correct / self.len_target_dataset)) return correct
Example #9
Source File: train.py From transferlearning with MIT License | 6 votes |
def test(model, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(DEVICE), target.to(DEVICE) out = model(data, data, target, DEVICE) s_output = out[0] test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).item() # sum up batch loss pred = s_output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) print(args.test_dir, '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct
Example #10
Source File: mnist.py From Pytorch-Project-Template with MIT License | 6 votes |
def validate(self): """ One cycle of model validation :return: """ self.model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in self.data_loader.test_loader: data, target = data.to(self.device), target.to(self.device) output = self.model(data) test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(self.data_loader.test_loader.dataset) self.logger.info('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(self.data_loader.test_loader.dataset), 100. * correct / len(self.data_loader.test_loader.dataset)))
Example #11
Source File: mnist.py From Pytorch-Project-Template with MIT License | 6 votes |
def train_one_epoch(self): """ One epoch of training :return: """ self.model.train() for batch_idx, (data, target) in enumerate(self.data_loader.train_loader): data, target = data.to(self.device), target.to(self.device) self.optimizer.zero_grad() output = self.model(data) loss = F.nll_loss(output, target) loss.backward() self.optimizer.step() if batch_idx % self.config.log_interval == 0: self.logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( self.current_epoch, batch_idx * len(data), len(self.data_loader.train_loader.dataset), 100. * batch_idx / len(self.data_loader.train_loader), loss.item())) self.current_iteration += 1
Example #12
Source File: mnist.py From Pytorch-Project-Template with MIT License | 6 votes |
def test(args, model, device, test_loader): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
Example #13
Source File: batch.py From comet-commonsense with Apache License 2.0 | 6 votes |
def mle_steps(key, model, input_, targets, attention_mask, loss_reduction="mean", i=None): word_acts = decode(model, input_.unsqueeze(1), attention_mask, i) word_dist = train_utils.modify_output_for_loss_fn( "nll", word_acts, dim=-1) # Compute losses loss = F.nll_loss( word_dist.view(-1, word_dist.size(-1)), targets, reduction=loss_reduction) if loss_reduction != "mean": return loss.view(word_dist.size(0), -1), word_dist else: return loss, word_dist
Example #14
Source File: DSAN.py From transferlearning with MIT License | 6 votes |
def test(model): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in target_test_loader: if cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) s_output, t_output = model(data, data, target) test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target).item() # sum up batch loss pred = s_output.data.max(1)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len_target_dataset print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( target_name, test_loss, correct, len_target_dataset, 100. * correct / len_target_dataset)) return correct
Example #15
Source File: mnist.py From dockerfiles with Apache License 2.0 | 6 votes |
def test(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) # Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28) data = data.view(-1, 28*28) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).item() # Sum up batch loss pred = output.data.max(1, keepdim=True)[1] # Get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).long().cpu().sum() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
Example #16
Source File: mnist_nn_conv.py From pytorch_geometric with MIT License | 6 votes |
def train(epoch): model.train() if epoch == 16: for param_group in optimizer.param_groups: param_group['lr'] = 0.001 if epoch == 26: for param_group in optimizer.param_groups: param_group['lr'] = 0.0001 for data in train_loader: data = data.to(device) optimizer.zero_grad() F.nll_loss(model(data), data.y).backward() optimizer.step()
Example #17
Source File: mnist.py From dockerfiles with Apache License 2.0 | 6 votes |
def test(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
Example #18
Source File: mnist.py From dockerfiles with Apache License 2.0 | 6 votes |
def test(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) # Resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28) data = data.view(-1, 28*28) output = model(data) test_loss += F.nll_loss(output, target, size_average=False).data[0]() # Sum up batch loss pred = output.data.max(1, keepdim=True)[1] # Get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).long().cpu().sum() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
Example #19
Source File: train.py From pytorch_geometric with MIT License | 6 votes |
def train_runtime(model, data, epochs, device): optimizer = torch.optim.Adam(model.parameters(), lr=0.01) model = model.to(device) data = data.to(device) model.train() mask = data.train_mask if 'train_mask' in data else data.train_idx y = data.y[mask] if 'train_mask' in data else data.train_y if torch.cuda.is_available(): torch.cuda.synchronize() t_start = time.perf_counter() for epoch in range(epochs): optimizer.zero_grad() out = model(data) loss = F.nll_loss(out[mask], y) loss.backward() optimizer.step() if torch.cuda.is_available(): torch.cuda.synchronize() t_end = time.perf_counter() return t_end - t_start
Example #20
Source File: train_eval.py From IGMC with MIT License | 6 votes |
def eval_loss(model, loader, device, regression=False, show_progress=False): model.eval() loss = 0 if show_progress: print('Testing begins...') pbar = tqdm(loader) else: pbar = loader for data in pbar: data = data.to(device) with torch.no_grad(): out = model(data) if regression: loss += F.mse_loss(out, data.y.view(-1), reduction='sum').item() else: loss += F.nll_loss(out, data.y.view(-1), reduction='sum').item() torch.cuda.empty_cache() return loss / len(loader.dataset)
Example #21
Source File: mutag_gin.py From pytorch_geometric with MIT License | 6 votes |
def train(epoch): model.train() if epoch == 51: for param_group in optimizer.param_groups: param_group['lr'] = 0.5 * param_group['lr'] loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output = model(data.x, data.edge_index, data.batch) loss = F.nll_loss(output, data.y) loss.backward() loss_all += loss.item() * data.num_graphs optimizer.step() return loss_all / len(train_dataset)
Example #22
Source File: sgcn.py From SGCN with GNU General Public License v3.0 | 6 votes |
def calculate_regression_loss(self, z, target): """ Calculating the regression loss for all pairs of nodes. :param z: Hidden vertex representations. :param target: Target vector. :return loss_term: Regression loss. :return predictions_soft: Predictions for each vertex pair. """ pos = torch.cat((self.positive_z_i, self.positive_z_j), 1) neg = torch.cat((self.negative_z_i, self.negative_z_j), 1) surr_neg_i = torch.cat((self.negative_z_i, self.negative_z_k), 1) surr_neg_j = torch.cat((self.negative_z_j, self.negative_z_k), 1) surr_pos_i = torch.cat((self.positive_z_i, self.positive_z_k), 1) surr_pos_j = torch.cat((self.positive_z_j, self.positive_z_k), 1) features = torch.cat((pos, neg, surr_neg_i, surr_neg_j, surr_pos_i, surr_pos_j)) predictions = torch.mm(features, self.regression_weights) predictions_soft = F.log_softmax(predictions, dim=1) loss_term = F.nll_loss(predictions_soft, target) return loss_term, predictions_soft
Example #23
Source File: train_eval.py From pytorch_geometric with MIT License | 6 votes |
def evaluate(model, data): model.eval() with torch.no_grad(): logits = model(data) outs = {} for key in ['train', 'val', 'test']: mask = data['{}_mask'.format(key)] loss = F.nll_loss(logits[mask], data.y[mask]).item() pred = logits[mask].max(1)[1] acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item() outs['{}_loss'.format(key)] = loss outs['{}_acc'.format(key)] = acc return outs
Example #24
Source File: mnist_graclus.py From pytorch_geometric with MIT License | 6 votes |
def train(epoch): model.train() if epoch == 16: for param_group in optimizer.param_groups: param_group['lr'] = 0.001 if epoch == 26: for param_group in optimizer.param_groups: param_group['lr'] = 0.0001 for data in train_loader: data = data.to(device) optimizer.zero_grad() F.nll_loss(model(data), data.y).backward() optimizer.step()
Example #25
Source File: pointnet2_segmentation.py From pytorch_geometric with MIT License | 6 votes |
def train(): model.train() total_loss = correct_nodes = total_nodes = 0 for i, data in enumerate(train_loader): data = data.to(device) optimizer.zero_grad() out = model(data) loss = F.nll_loss(out, data.y) loss.backward() optimizer.step() total_loss += loss.item() correct_nodes += out.argmax(dim=1).eq(data.y).sum().item() total_nodes += data.num_nodes if (i + 1) % 10 == 0: print(f'[{i+1}/{len(train_loader)}] Loss: {total_loss / 10:.4f} ' f'Train Acc: {correct_nodes / total_nodes:.4f}') total_loss = correct_nodes = total_nodes = 0
Example #26
Source File: dee_model.py From Doc2EDAG with MIT License | 6 votes |
def get_event_cls_info(self, sent_context_emb, doc_fea, train_flag=True): doc_event_logps = [] for event_idx, event_label in enumerate(doc_fea.event_type_labels): event_table = self.event_tables[event_idx] cur_event_logp = event_table(sent_context_emb=sent_context_emb) # [1, hidden_size] doc_event_logps.append(cur_event_logp) doc_event_logps = torch.cat(doc_event_logps, dim=0) # [num_event_types, 2] if train_flag: device = doc_event_logps.device doc_event_labels = torch.tensor( doc_fea.event_type_labels, device=device, dtype=torch.long, requires_grad=False ) # [num_event_types] doc_event_cls_loss = F.nll_loss(doc_event_logps, doc_event_labels, reduction='sum') return doc_event_cls_loss else: doc_event_pred_list = doc_event_logps.argmax(dim=-1).tolist() return doc_event_pred_list
Example #27
Source File: criterions.py From ConvLab with MIT License | 6 votes |
def forward(self, net_output, labels): batch_size = net_output.size(0) pred = net_output.view(-1, net_output.size(-1)) target = labels.view(-1) if self.avg_type is None: loss = F.nll_loss(pred, target, size_average=False, ignore_index=self.padding_idx) elif self.avg_type == 'seq': loss = F.nll_loss(pred, target, size_average=False, ignore_index=self.padding_idx) loss = loss / batch_size elif self.avg_type == 'real_word': loss = F.nll_loss(pred, target, ignore_index=self.padding_idx, reduce=False) loss = loss.view(-1, net_output.size(1)) loss = th.sum(loss, dim=1) word_cnt = th.sum(th.sign(labels), dim=1).float() loss = loss / word_cnt loss = th.mean(loss) elif self.avg_type == 'word': loss = F.nll_loss(pred, target, size_average=True, ignore_index=self.padding_idx) else: raise ValueError('Unknown average type') return loss
Example #28
Source File: models.py From cvpr2018-hnd with MIT License | 6 votes |
def forward(self, input, target): # input = Variable(logits), target = labels loss = Variable(torch.zeros(1).cuda()) if self.gpu else Variable(torch.zeros(1)) # novel loss if self.loo > 0.: target_novel = self.labels_relevant[target] for i, rel in enumerate(self.relevant): if target_novel[:,i].any(): relevant_loc = target_novel[:,i].nonzero().view(-1) loss += -F.log_softmax(input[relevant_loc][:, rel], dim=1)[:,0].mean() * self.class_weight[i] loss *= self.loo # known loss log_probs = F.log_softmax(input, dim=1) loss += F.nll_loss(log_probs, Variable(target)) # regularization if self.label_smooth > 0.: loss -= (log_probs.mean() + self.kld_u_const) * self.label_smooth return loss
Example #29
Source File: arma.py From pytorch_geometric with MIT License | 5 votes |
def train(): model.train() optimizer.zero_grad() F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward() optimizer.step()
Example #30
Source File: proteins_diff_pool.py From pytorch_geometric with MIT License | 5 votes |
def train(epoch): model.train() loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output, _, _ = model(data.x, data.adj, data.mask) loss = F.nll_loss(output, data.y.view(-1)) loss.backward() loss_all += data.y.size(0) * loss.item() optimizer.step() return loss_all / len(train_dataset)