Python utils.AverageMeter() Examples
The following are 30
code examples of utils.AverageMeter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: test.py From DPC with MIT License | 7 votes |
def validate(data_loader, model): losses = AverageMeter() accuracy = AverageMeter() model.eval() with torch.no_grad(): for idx, (input_seq, target) in tqdm(enumerate(data_loader), total=len(data_loader)): input_seq = input_seq.to(cuda) target = target.to(cuda) B = input_seq.size(0) output, _ = model(input_seq) [_, N, D] = output.size() output = output.view(B*N, D) target = target.repeat(1, N).view(-1) loss = criterion(output, target) acc = calc_accuracy(output, target) losses.update(loss.item(), B) accuracy.update(acc.item(), B) print('Loss {loss.avg:.4f}\t' 'Acc: {acc.avg:.4f} \t'.format(loss=losses, acc=accuracy)) return losses.avg, accuracy.avg
Example #2
Source File: main.py From transferlearning with MIT License | 6 votes |
def test(model, target_test_loader): model.eval() test_loss = utils.AverageMeter() correct = 0 criterion = torch.nn.CrossEntropyLoss() len_target_dataset = len(target_test_loader.dataset) with torch.no_grad(): for data, target in target_test_loader: data, target = data.to(DEVICE), target.to(DEVICE) s_output = model.predict(data) loss = criterion(s_output, target) test_loss.update(loss.item()) pred = torch.max(s_output, 1)[1] correct += torch.sum(pred == target) print('{} --> {}: max correct: {}, accuracy{: .2f}%\n'.format( source_name, target_name, correct, 100. * correct / len_target_dataset))
Example #3
Source File: main_DTN.py From DTN with MIT License | 6 votes |
def base_val(base_train_loader, model_E, criterion, epoch): losses = AverageMeter() top1 = AverageMeter() model_E.eval() with torch.no_grad(): for batch_idx, (input, target) in enumerate(base_train_loader): input = input.cuda() target = target.cuda(non_blocking=True) # compute output _, output = model_E(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) model_E.weight_norm() if (batch_idx+1)%250==0: print('base_test:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg) return (losses.avg, top1.avg)
Example #4
Source File: train_reinforce_model.py From latent-treelstm with MIT License | 5 votes |
def validate(valid_data, model, epoch, device, logger, summary_writer): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() entropy_meter = AverageMeter() n_entropy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for labels, tokens, mask in valid_data: labels = labels.to(device=device, non_blocking=True) tokens = tokens.to(device=device, non_blocking=True) mask = mask.to(device=device, non_blocking=True) loading_time_meter.update(time.time() - start) pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \ model(tokens, mask, labels) entropy = entropy.mean() normalized_entropy = normalized_entropy.mean() accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) entropy_meter.update(entropy.item(), n) n_entropy_meter.update(normalized_entropy.item(), n) batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Valid: epoch: {epoch} ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") summary_writer["valid"].add_scalar(tag="ce", scalar_value=ce_loss_meter.avg, global_step=global_step) summary_writer["valid"].add_scalar(tag="accuracy", scalar_value=accuracy_meter.avg, global_step=global_step) summary_writer["valid"].add_scalar(tag="n_entropy", scalar_value=n_entropy_meter.avg, global_step=global_step) model.train() return accuracy_meter.avg
Example #5
Source File: test.py From DPC with MIT License | 5 votes |
def test(data_loader, model): losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() confusion_mat = ConfusionMeter(args.num_class) model.eval() with torch.no_grad(): for idx, (input_seq, target) in tqdm(enumerate(data_loader), total=len(data_loader)): input_seq = input_seq.to(cuda) target = target.to(cuda) B = input_seq.size(0) input_seq = input_seq.squeeze(0) # squeeze the '1' batch dim output, _ = model(input_seq) del input_seq top1, top5 = calc_topk_accuracy(torch.mean( torch.mean( nn.functional.softmax(output,2), 0),0, keepdim=True), target, (1,5)) acc_top1.update(top1.item(), B) acc_top5.update(top5.item(), B) del top1, top5 output = torch.mean(torch.mean(output, 0), 0, keepdim=True) loss = criterion(output, target.squeeze(-1)) losses.update(loss.item(), B) del loss _, pred = torch.max(output, 1) confusion_mat.update(pred, target.view(-1).byte()) print('Loss {loss.avg:.4f}\t' 'Acc top1: {top1.avg:.4f} Acc top5: {top5.avg:.4f} \t'.format(loss=losses, top1=acc_top1, top5=acc_top5)) confusion_mat.plot_mat(args.test+'.svg') write_log(content='Loss {loss.avg:.4f}\t Acc top1: {top1.avg:.4f} Acc top5: {top5.avg:.4f} \t'.format(loss=losses, top1=acc_top1, top5=acc_top5, args=args), epoch=num_epoch, filename=os.path.join(os.path.dirname(args.test), 'test_log.md')) import ipdb; ipdb.set_trace() return losses.avg, [acc_top1.avg, acc_top5.avg]
Example #6
Source File: train.py From MnemonicReader with BSD 3-Clause "New" or "Revised" License | 5 votes |
def validate_unofficial(args, data_loader, model, global_stats, mode): """Run one full unofficial validation. Unofficial = doesn't use SQuAD script. """ eval_time = utils.Timer() start_acc = utils.AverageMeter() end_acc = utils.AverageMeter() exact_match = utils.AverageMeter() # Make predictions examples = 0 for ex in data_loader: batch_size = ex[0].size(0) pred_s, pred_e, _ = model.predict(ex) target_s, target_e = ex[-3:-1] # We get metrics for independent start/end and joint start/end accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e) start_acc.update(accuracies[0], batch_size) end_acc.update(accuracies[1], batch_size) exact_match.update(accuracies[2], batch_size) # If getting train accuracies, sample max 10k examples += batch_size if mode == 'train' and examples >= 1e4: break logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' % (mode, global_stats['epoch'], start_acc.avg) + 'end = %.2f | exact = %.2f | examples = %d | ' % (end_acc.avg, exact_match.avg, examples) + 'valid time = %.2f (s)' % eval_time.time()) return {'exact_match': exact_match.avg}
Example #7
Source File: train.py From MnemonicReader with BSD 3-Clause "New" or "Revised" License | 5 votes |
def train(args, data_loader, model, global_stats): """Run through one epoch of model training with the provided data loader.""" # Initialize meters + timers train_loss = utils.AverageMeter() epoch_time = utils.Timer() # Run one epoch for idx, ex in enumerate(data_loader): train_loss.update(*model.update(ex)) if idx % args.display_iter == 0: logger.info('train: Epoch = %d | iter = %d/%d | ' % (global_stats['epoch'], idx, len(data_loader)) + 'loss = %.2f | elapsed time = %.2f (s)' % (train_loss.avg, global_stats['timer'].time())) train_loss.reset() logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' % (global_stats['epoch'], epoch_time.time())) # Checkpoint if args.checkpoint: model.checkpoint(args.model_file + '.checkpoint', global_stats['epoch'] + 1) # ------------------------------------------------------------------------------ # Validation loops. Includes both "unofficial" and "official" functions that # use different metrics and implementations. # ------------------------------------------------------------------------------
Example #8
Source File: main_DTN.py From DTN with MIT License | 5 votes |
def base_train(base_train_loader, model_E, criterion, optimizer, epoch): losses = AverageMeter() top1 = AverageMeter() model_E.train() # for param in model_E.parameters(): # param.requires_grad = True for batch_idx, (input, target) in enumerate(base_train_loader): # print(target) input = input.cuda() target = target.cuda(non_blocking=True) # compute output _, output = model_E(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() model_E.weight_norm() if (batch_idx+1)%250==0: print('base_train:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg) return (losses.avg, top1.avg)
Example #9
Source File: search.py From pt.darts with MIT License | 5 votes |
def validate(valid_loader, model, epoch, cur_step): top1 = utils.AverageMeter() top5 = utils.AverageMeter() losses = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (X, y) in enumerate(valid_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) logits = model(X) loss = model.criterion(logits, y) prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(valid_loader)-1: logger.info( "Valid: [{:2d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('val/loss', losses.avg, cur_step) writer.add_scalar('val/top1', top1.avg, cur_step) writer.add_scalar('val/top5', top5.avg, cur_step) logger.info("Valid: [{:2d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg)) return top1.avg
Example #10
Source File: main.py From transferlearning with MIT License | 5 votes |
def train(source_loader, target_train_loader, target_test_loader, model, optimizer, CFG): len_source_loader = len(source_loader) len_target_loader = len(target_train_loader) for e in range(CFG['epoch']): train_loss_clf = utils.AverageMeter() train_loss_transfer = utils.AverageMeter() train_loss_total = utils.AverageMeter() model.train() iter_source, iter_target = iter( source_loader), iter(target_train_loader) n_batch = min(len_source_loader, len_target_loader) criterion = torch.nn.CrossEntropyLoss() for i in range(n_batch): data_source, label_source = iter_source.next() data_target, _ = iter_target.next() data_source, label_source = data_source.to( DEVICE), label_source.to(DEVICE) data_target = data_target.to(DEVICE) optimizer.zero_grad() label_source_pred, transfer_loss = model(data_source, data_target) clf_loss = criterion(label_source_pred, label_source) loss = clf_loss + CFG['lambda'] * transfer_loss loss.backward() optimizer.step() train_loss_clf.update(clf_loss.item()) train_loss_transfer.update(transfer_loss.item()) train_loss_total.update(loss.item()) if i % CFG['log_interval'] == 0: print('Train Epoch: [{}/{} ({:02d}%)], cls_Loss: {:.6f}, transfer_loss: {:.6f}, total_Loss: {:.6f}'.format( e + 1, CFG['epoch'], int(100. * i / n_batch), train_loss_clf.avg, train_loss_transfer.avg, train_loss_total.avg)) log.append([train_loss_clf.avg, train_loss_transfer.avg, train_loss_total.avg]) np_log = np.array(log, dtype=float) np.savetxt('train_log.csv', np_log, delimiter=',', fmt='%.6f') # Test test(model, target_test_loader)
Example #11
Source File: augment.py From pt.darts with MIT License | 5 votes |
def validate(valid_loader, model, criterion, epoch, cur_step): top1 = utils.AverageMeter() top5 = utils.AverageMeter() losses = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (X, y) in enumerate(valid_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) logits, _ = model(X) loss = criterion(logits, y) prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(valid_loader)-1: logger.info( "Valid: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('val/loss', losses.avg, cur_step) writer.add_scalar('val/top1', top1.avg, cur_step) writer.add_scalar('val/top5', top5.avg, cur_step) logger.info("Valid: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg)) return top1.avg
Example #12
Source File: main.py From Gabor_CNN_PyTorch with MIT License | 5 votes |
def test(epoch): model.eval() test_loss = AverageMeter() acc = AverageMeter() with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss.update(F.cross_entropy(output, target, reduction='mean').item(), target.size(0)) prec1, = accuracy(output, target) # test precison in one batch acc.update(prec1.item(), target.size(0)) print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(test_loss.avg, acc.avg)) writer.add_scalar('Loss/Test', test_loss.avg, epoch) writer.add_scalar('Accuracy/Test', acc.avg, epoch) return acc.avg
Example #13
Source File: train.py From MnemonicReader with BSD 3-Clause "New" or "Revised" License | 5 votes |
def validate_official(args, data_loader, model, global_stats, offsets, texts, answers): """Run one full official validation. Uses exact spans and same exact match/F1 score computation as in the SQuAD script. Extra arguments: offsets: The character start/end indices for the tokens in each context. texts: Map of qid --> raw text of examples context (matches offsets). answers: Map of qid --> list of accepted answers. """ eval_time = utils.Timer() f1 = utils.AverageMeter() exact_match = utils.AverageMeter() # Run through examples examples = 0 for ex in data_loader: ex_id, batch_size = ex[-1], ex[0].size(0) pred_s, pred_e, _ = model.predict(ex) for i in range(batch_size): s_offset = offsets[ex_id[i]][pred_s[i][0]][0] e_offset = offsets[ex_id[i]][pred_e[i][0]][1] prediction = texts[ex_id[i]][s_offset:e_offset] # Compute metrics ground_truths = answers[ex_id[i]] exact_match.update(utils.metric_max_over_ground_truths( utils.exact_match_score, prediction, ground_truths)) f1.update(utils.metric_max_over_ground_truths( utils.f1_score, prediction, ground_truths)) examples += batch_size logger.info('dev valid official: Epoch = %d | EM = %.2f | ' % (global_stats['epoch'], exact_match.avg * 100) + 'F1 = %.2f | examples = %d | valid time = %.2f (s)' % (f1.avg * 100, examples, eval_time.time())) return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100}
Example #14
Source File: train_ppo_model.py From latent-treelstm with MIT License | 5 votes |
def test(test_data, model, device, logger): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() entropy_meter = AverageMeter() n_entropy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for labels, tokens, mask in test_data: labels = labels.to(device=device, non_blocking=True) tokens = tokens.to(device=device, non_blocking=True) mask = mask.to(device=device, non_blocking=True) loading_time_meter.update(time.time() - start) pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \ model(tokens, mask, labels) entropy = entropy.mean() normalized_entropy = normalized_entropy.mean() accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) entropy_meter.update(entropy.item(), n) n_entropy_meter.update(normalized_entropy.item(), n) batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") logger.info("done") return accuracy_meter.avg
Example #15
Source File: train_ppo_model.py From latent-treelstm with MIT License | 5 votes |
def validate(valid_data, model, epoch, device, logger, summary_writer): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() entropy_meter = AverageMeter() n_entropy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for labels, tokens, mask in valid_data: labels = labels.to(device=device, non_blocking=True) tokens = tokens.to(device=device, non_blocking=True) mask = mask.to(device=device, non_blocking=True) loading_time_meter.update(time.time() - start) pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \ model(tokens, mask, labels) entropy = entropy.mean() normalized_entropy = normalized_entropy.mean() accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) entropy_meter.update(entropy.item(), n) n_entropy_meter.update(normalized_entropy.item(), n) batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Valid: epoch: {epoch} ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") summary_writer["valid"].add_scalar(tag="ce", scalar_value=ce_loss_meter.avg, global_step=global_step) summary_writer["valid"].add_scalar(tag="accuracy", scalar_value=accuracy_meter.avg, global_step=global_step) summary_writer["valid"].add_scalar(tag="n_entropy", scalar_value=n_entropy_meter.avg, global_step=global_step) model.train() return accuracy_meter.avg
Example #16
Source File: train_tree_lstm_model.py From latent-treelstm with MIT License | 5 votes |
def test(test_data, model, device, logger): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for labels, tokens, trees, mask in test_data: labels = labels.to(device=device, non_blocking=True) tokens = tokens.to(device=device, non_blocking=True) trees = [e.to(device=device, non_blocking=True) for e in trees] mask = mask.to(device=device, non_blocking=True) loading_time_meter.update(time.time() - start) ce_loss, pred_labels = model(tokens, trees, mask, labels) accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) model.reset_memory_managers() batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") logger.info("done") return accuracy_meter.avg
Example #17
Source File: train_reinforce_model.py From latent-treelstm with MIT License | 5 votes |
def test(test_data, model, device, logger): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() entropy_meter = AverageMeter() n_entropy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for labels, tokens, mask in test_data: labels = labels.to(device=device, non_blocking=True) tokens = tokens.to(device=device, non_blocking=True) mask = mask.to(device=device, non_blocking=True) loading_time_meter.update(time.time() - start) pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \ model(tokens, mask, labels) entropy = entropy.mean() normalized_entropy = normalized_entropy.mean() accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) entropy_meter.update(entropy.item(), n) n_entropy_meter.update(normalized_entropy.item(), n) batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") logger.info("done") return accuracy_meter.avg
Example #18
Source File: train_rpn_kws.py From RPN_KWS with MIT License | 5 votes |
def train(args, model, device, train_loader, optimizer, epoch): """Train one epoch.""" tr_rpn_loss_bbox = AverageMeter() tr_rpn_loss_cls = AverageMeter() tr_loss = AverageMeter() tr_rpn_acc = AverageMeter() model.train() total_step = len(train_loader) balance_weight=args.lambda_factor for batch_idx, (utt_id, act_lens, data, target) in enumerate(train_loader): act_lens, data, target = act_lens.to(device), data.to(device), target.to(device) target = target.reshape(target.size(0), 1, target.size(1)).float() # Forward pass batch_size = data.shape[0] outputs = model(epoch, data, act_lens, target, 100) rois, rpn_cls_score, rpn_label, rpn_loss_cls, rpn_loss_bbox = outputs rpn_acc = acc_frame(rpn_cls_score, rpn_label) # Backward and optimize loss = rpn_loss_cls + balance_weight * rpn_loss_bbox optimizer.zero_grad() loss.backward() #name, param=list(model.named_parameters())[1] #print('Epoch:[{}/{}], param name:{},\n param:'.format(epoch+1, args.max_epochs, name, param)) optimizer.step() tr_rpn_acc.update(rpn_acc, 1) tr_loss.update(loss, 1) tr_rpn_loss_cls.update(rpn_loss_cls, 1) tr_rpn_loss_bbox.update(rpn_loss_bbox, 1) if batch_idx % args.log_interval == 0: print('Epoch: [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Train RPN Acc: {:.4f}%' .format(epoch+1, args.max_epochs, batch_idx+1, total_step, tr_loss.cur, tr_rpn_acc.cur)) print('Epoch: [{}/{}], Step [{}/{}], Train RPN cls Loss: {:.4f}, Train RPN bbox Loss: {:.4f} ' .format(epoch+1, args.max_epochs, batch_idx+1, total_step, tr_rpn_loss_cls.cur, tr_rpn_loss_bbox.cur)) print('Epoch: [{}/{}], Average Train Loss: {:.4f}, Average Train RPN cls Loss: {:.4f}, Average Train RPN bbox Loss: {:.4f}, AverageAverage Train RPN Acc: {:.4f}%' .format(epoch+1, args.max_epochs, tr_loss.avg, tr_rpn_loss_cls.avg, tr_rpn_loss_bbox.avg, tr_rpn_acc.avg)) return float("{:.4f}".format(tr_loss.avg))
Example #19
Source File: train.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def train(train_loader, net, criterion, optimizer, epoch, train_args): train_loss = AverageMeter() curr_iter = (epoch - 1) * len(train_loader) for i, data in enumerate(train_loader): inputs, labels = data assert inputs.size()[2:] == labels.size()[1:] N = inputs.size(0) inputs = Variable(inputs).cuda() labels = Variable(labels).cuda() optimizer.zero_grad() outputs = net(inputs) assert outputs.size()[2:] == labels.size()[1:] assert outputs.size()[1] == voc.num_classes loss = criterion(outputs, labels) / N loss.backward() optimizer.step() train_loss.update(loss.data[0], N) curr_iter += 1 writer.add_scalar('train_loss', train_loss.avg, curr_iter) if (i + 1) % train_args['print_freq'] == 0: print('[epoch %d], [iter %d / %d], [train loss %.5f]' % ( epoch, i + 1, len(train_loader), train_loss.avg ))
Example #20
Source File: train_ppo_model.py From latent-treelstm with MIT License | 5 votes |
def test(test_data, model, device, logger): loading_time_meter = AverageMeter() batch_time_meter = AverageMeter() ce_loss_meter = AverageMeter() accuracy_meter = AverageMeter() entropy_meter = AverageMeter() n_entropy_meter = AverageMeter() model.eval() start = time.time() with torch.no_grad(): for batch in test_data: tokens, length = batch.text labels = batch.label mask = length_to_mask(length) loading_time_meter.update(time.time() - start) pred_labels, ce_loss, rewards, actions, actions_log_prob, entropy, normalized_entropy = \ model(tokens, mask, labels) entropy = entropy.mean() normalized_entropy = normalized_entropy.mean() accuracy = (labels == pred_labels).to(dtype=torch.float32).mean() n = mask.shape[0] accuracy_meter.update(accuracy.item(), n) ce_loss_meter.update(ce_loss.item(), n) entropy_meter.update(entropy.item(), n) n_entropy_meter.update(normalized_entropy.item(), n) batch_time_meter.update(time.time() - start) start = time.time() logger.info(f"Test: ce_loss: {ce_loss_meter.avg:.4f} accuracy: {accuracy_meter.avg:.4f} " f"entropy: {entropy_meter.avg:.4f} n_entropy: {n_entropy_meter.avg:.4f} " f"loading_time: {loading_time_meter.avg:.4f} batch_time: {batch_time_meter.avg:.4f}") logger.info("done") return accuracy_meter.avg
Example #21
Source File: main.py From iccv2019-inc with MIT License | 5 votes |
def test0(test_loader, model, criterion, task_info, t, epoch): task, class_map = task_info task = torch.tensor(task).to(device) class_map = torch.tensor(class_map).to(device) model.eval() loss_ = utils.AverageMeter() acc_ = utils.AverageMeter() pbar = tqdm(test_loader, desc='te {:d}'.format(epoch), ascii=True, ncols=80) for data, target, _ in pbar: data, target = data.to(device), target.to(device) with torch.no_grad(): output, _ = model(data) loss = criterion(output[:,task], class_map[target]).mean() acc = get_performance(output[:,task], class_map[target]) loss_.update(loss.item(), data.size(0)) acc_.update(acc.item(), data.size(0)) pbar.set_postfix(acc='{:5.2f}'.format(acc_.avg*100.), loss='{:.4f}'.format(loss_.avg)) res = {'loss': {'val': loss_.avg}, 'acc' : {'mean': acc_.avg*100.}} for s in range(1): res['acc'].update({'{}'.format(s): acc_.avg*100.}) return res
Example #22
Source File: main.py From iccv2019-inc with MIT License | 5 votes |
def test(test_loader, model, criterion, task_info, t, epoch): tasks, class_maps, _, _, seen, seen_map = task_info tasks = [torch.tensor(task).to(device) for task in tasks] class_maps, seen_map = [torch.tensor(class_map).to(device) for class_map in class_maps], torch.tensor(seen_map).to(device) model.eval() loss_ = utils.AverageMeter() acc_ = utils.AverageMeter() acc_t_ = [utils.AverageMeter() for _ in range(t+1)] pbar = tqdm(test_loader, desc='te {:d}'.format(epoch), ascii=True, ncols=80) for data, target, _ in pbar: data, target = data.to(device), target.to(device) with torch.no_grad(): output, feature = model(data) loss = criterion(output[:,seen], seen_map[target]).mean() acc = get_performance(output[:,seen], seen_map[target]) loss_.update(loss.item(), data.size(0)) acc_.update(acc.item(), data.size(0)) pbar.set_postfix(acc='{:5.2f}'.format(acc_.avg*100.), loss='{:.4f}'.format(loss_.avg)) for s, task in enumerate(tasks[:t+1]): loc = torch.zeros_like(target, dtype=torch.uint8) for k in task: loc |= (target == k) num_data_s = loc.to(torch.long).sum() if num_data_s > 0: acc = get_performance(output[loc][:,seen], seen_map[target[loc]]) acc_t_[s].update(acc.item(), loc.to(torch.long).sum().item()) if epoch+1 == args.epochs: for s in range(t+1): print('{:d}:{:5.2f}, '.format(s, acc_t_[s].avg*100.), end='') print() res = {'loss': {'val': loss_.avg}, 'acc' : {'mean': acc_.avg*100.}} for s in range(t+1): res['acc'].update({'{}'.format(s): acc_t_[s].avg*100.}) return res
Example #23
Source File: train.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def train(train_loader, net, criterion, optimizer, epoch, train_args): train_loss = AverageMeter() curr_iter = (epoch - 1) * len(train_loader) for i, data in enumerate(train_loader): inputs, labels = data assert inputs.size()[2:] == labels.size()[1:] N = inputs.size(0) inputs = Variable(inputs).cuda() labels = Variable(labels).cuda() optimizer.zero_grad() outputs = net(inputs) assert outputs.size()[2:] == labels.size()[1:] assert outputs.size()[1] == cityscapes.num_classes loss = criterion(outputs, labels) / N loss.backward() optimizer.step() train_loss.update(loss.data[0], N) curr_iter += 1 writer.add_scalar('train_loss', train_loss.avg, curr_iter) if (i + 1) % train_args['print_freq'] == 0: print('[epoch %d], [iter %d / %d], [train loss %.5f]' % ( epoch, i + 1, len(train_loader), train_loss.avg))
Example #24
Source File: train.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def train(train_loader, net, criterion, optimizer, epoch, train_args): train_loss = AverageMeter() curr_iter = (epoch - 1) * len(train_loader) for i, data in enumerate(train_loader): inputs, labels = data assert inputs.size()[2:] == labels.size()[1:] N = inputs.size(0) inputs = Variable(inputs).cuda() labels = Variable(labels).cuda() optimizer.zero_grad() outputs = net(inputs) assert outputs.size()[2:] == labels.size()[1:] assert outputs.size()[1] == cityscapes.num_classes loss = criterion(outputs, labels) / N loss.backward() optimizer.step() train_loss.update(loss.data[0], N) curr_iter += 1 writer.add_scalar('train_loss', train_loss.avg, curr_iter) if (i + 1) % train_args['print_freq'] == 0: print('[epoch %d], [iter %d / %d], [train loss %.5f]' % ( epoch, i + 1, len(train_loader), train_loss.avg))
Example #25
Source File: train.py From pytorch-semantic-segmentation with MIT License | 5 votes |
def train(train_loader, net, criterion, optimizer, epoch, train_args): train_loss = AverageMeter() curr_iter = (epoch - 1) * len(train_loader) for i, data in enumerate(train_loader): inputs, labels = data assert inputs.size()[2:] == labels.size()[1:] N = inputs.size(0) inputs = Variable(inputs).cuda() labels = Variable(labels).cuda() optimizer.zero_grad() outputs = net(inputs) assert outputs.size()[2:] == labels.size()[1:] assert outputs.size()[1] == voc.num_classes loss = criterion(outputs, labels) / N loss.backward() optimizer.step() train_loss.update(loss.data[0], N) curr_iter += 1 writer.add_scalar('train_loss', train_loss.avg, curr_iter) if (i + 1) % train_args['print_freq'] == 0: print('[epoch %d], [iter %d / %d], [train loss %.5f]' % ( epoch, i + 1, len(train_loader), train_loss.avg ))
Example #26
Source File: train.py From sodeep with BSD 3-Clause Clear License | 5 votes |
def validate(val_loader, model, criterion, print_freq=1): model.eval() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() for i, (s, r) in enumerate(val_loader): seq_in, rank_in = s.float().to(device, non_blocking=True), r.float().to(device, non_blocking=True) data_time.update(time.time() - end) with torch.set_grad_enabled(False): rank_hat = model(seq_in) loss = criterion(rank_hat, rank_in) losses.update(loss.item(), seq_in.size(0)) batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0: print('Val: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( i + 1, len(val_loader), batch_time=batch_time, data_time=data_time, loss=losses), end="\r") print('Val: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( i + 1, len(val_loader), batch_time=batch_time, data_time=data_time, loss=losses), end="\n") return losses.avg, batch_time.avg, data_time.avg
Example #27
Source File: train.py From sodeep with BSD 3-Clause Clear License | 5 votes |
def train(train_loader, model, criterion, optimizer, epoch, print_freq=1): model.train() batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() end = time.time() for i, (s, r) in enumerate(train_loader): seq_in, rank_in = s.float().to(device, non_blocking=True), r.float().to(device, non_blocking=True) data_time.update(time.time() - end) optimizer.zero_grad() rank_hat = model(seq_in) loss = criterion(rank_hat, rank_in) loss.backward() optimizer.step() losses.update(loss.item(), seq_in.size(0)) batch_time.update(time.time() - end) end = time.time() if i % print_freq == 0: print('Train: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch, i + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses), end="\r") print('Train: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch, i + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses), end="\n") return losses.avg, batch_time.avg, data_time.avg
Example #28
Source File: eval.py From Pytorch-STN with MIT License | 5 votes |
def evaluate(net, dataloader, loss_fn, params, metrics): net.eval() summaries = [] loss_avg = utils.AverageMeter() with tqdm(total=len(dataloader)) as t: for i, (data, label) in enumerate(dataloader): if params.cuda: data, label = data.cuda(), label.cuda() data, label = Variable(data), Variable(label) # print(data.size()) # print(label.size()) # run the input through the net out = net(data) # print(out.size()) loss = loss_fn(out, label) loss_avg.update(loss.data[0].item()) out_batch = out.data.cpu().numpy() label_batch = label.data.cpu().numpy() summary_batch = {metric:metrics[metric](out_batch, label_batch) for metric in metrics} summary_batch['loss'] = loss.data[0].cpu().item() summaries.append(summary_batch) mean_metrics = {metric:np.mean([m[metric] for m in summaries]) for metric in summaries[0]} metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in mean_metrics.items()) logging.info("Val Metrics: "+metrics_string) return mean_metrics
Example #29
Source File: train.py From Where-are-they-looking-PyTorch with MIT License | 5 votes |
def __init__(self, model, criterion, optimizer, opt, writer): self.model = model self.criterion = criterion self.optimizer = optimizer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.losses = AverageMeter() self.writer = writer
Example #30
Source File: train.py From Where-are-they-looking-PyTorch with MIT License | 5 votes |
def __init__(self, model, criterion, opt, writer): self.model = model self.criterion = criterion self.batch_time = AverageMeter() self.data_time = AverageMeter() self.dist = AverageMeter() self.mindist = AverageMeter() self.writer = writer