Python utils.accuracy() Examples
The following are 30
code examples of utils.accuracy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils
, or try the search function
.
Example #1
Source File: train_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def valid(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() with torch.no_grad(): model.eval() for step, (input, target) in enumerate(valid_queue): input = input.cuda() target = target.cuda() logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #2
Source File: train_imagenet.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def valid(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() with torch.no_grad(): model.eval() for step, (input, target) in enumerate(valid_queue): input = input.cuda() target = target.cuda() logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, top5.avg, objs.avg
Example #3
Source File: train_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def valid(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() with torch.no_grad(): model.eval() for step, (input, target) in enumerate(valid_queue): input = input.cuda() target = target.cuda() logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #4
Source File: train_search.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def child_valid(valid_queue, model, arch_pool, criterion): valid_acc_list = [] with torch.no_grad(): model.eval() for i, arch in enumerate(arch_pool): # for step, (input, target) in enumerate(valid_queue): inputs, targets = next(iter(valid_queue)) inputs = inputs.cuda() targets = targets.cuda() logits, _ = model(inputs, arch, bn_train=True) loss = criterion(logits, targets) prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5)) valid_acc_list.append(prec1.data/100) if (i+1) % 100 == 0: logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5) return valid_acc_list
Example #5
Source File: train_search.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def child_valid(valid_queue, model, arch_pool, criterion): valid_acc_list = [] with torch.no_grad(): model.eval() for i, arch in enumerate(arch_pool): #for step, (inputs, targets) in enumerate(valid_queue): inputs, targets = next(iter(valid_queue)) inputs = inputs.cuda() targets = targets.cuda() logits, _ = model(inputs, arch, bn_train=True) loss = criterion(logits, targets) prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5)) valid_acc_list.append(prec1.data/100) if (i+1) % 100 == 0: logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5) return valid_acc_list
Example #6
Source File: epoch.py From smooth-topk with MIT License | 6 votes |
def epoch_test_multiple_crops(model, loader, xp, cuda): metrics = xp.get_metric(tag=loader.tag, name='parent') xp.Temperature.update() for batch_idx, (data, target) in tqdm(enumerate(loader), desc='Test Epoch', leave=False, total=len(loader)): avg = 0 for img in data: img, target = data_to_var(img, target, cuda) output = model(img) # cross-entropy if xp.temperature == -1: avg += nn.functional.softmax(output).data # smooth-svm else: avg += output.data # avg += torch.exp(output.data / xp.temperature) prec1 = accuracy(avg, target.data, topk=1) preck = accuracy(avg, target.data, topk=xp.config['topk']) metrics.update(acck=float(preck), acc1=float(prec1), n=target.size(0))
Example #7
Source File: test_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 6 votes |
def valid(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() with torch.no_grad(): model.eval() for step, (input, target) in enumerate(valid_queue): input = input.cuda() target = target.cuda() logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('valid %03d %e %f %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #8
Source File: train_search_procedure.py From eval-nas with MIT License | 6 votes |
def nao_model_validation(valid_queue, model, arch_pool, criterion): valid_acc_list = [] with torch.no_grad(): model.eval() for i, arch in enumerate(arch_pool): # for step, (inputs, targets) in enumerate(valid_queue): inputs, targets = next(iter(valid_queue)) inputs = inputs.cuda() targets = targets.cuda() logits, _ = model(inputs, arch, bn_train=True) loss = criterion(logits, targets) prec1, prec5 = nao_utils.accuracy(logits, targets, topk=(1, 5)) valid_acc_list.append(prec1.data / 100) if (i + 1) % 100 == 0: logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5) return valid_acc_list
Example #9
Source File: train_imagenet.py From eval-nas with MIT License | 6 votes |
def infer(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for step, (input, target) in enumerate(valid_queue): input = Variable(input, volatile=True).cuda() target = Variable(target, volatile=True).cuda(async=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, top5.avg, objs.avg
Example #10
Source File: test.py From eval-nas with MIT License | 6 votes |
def infer(test_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for step, (input, target) in enumerate(test_queue): input = Variable(input, volatile=True).cuda() target = Variable(target, volatile=True).cuda(async=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #11
Source File: test_imagenet.py From eval-nas with MIT License | 6 votes |
def infer(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() for step, (input, target) in enumerate(valid_queue): input = Variable(input, volatile=True).cuda() target = Variable(target, volatile=True).cuda(async=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, top5.avg, objs.avg
Example #12
Source File: main_DTN.py From DTN with MIT License | 6 votes |
def base_val(base_train_loader, model_E, criterion, epoch): losses = AverageMeter() top1 = AverageMeter() model_E.eval() with torch.no_grad(): for batch_idx, (input, target) in enumerate(base_train_loader): input = input.cuda() target = target.cuda(non_blocking=True) # compute output _, output = model_E(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) model_E.weight_norm() if (batch_idx+1)%250==0: print('base_test:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg) return (losses.avg, top1.avg)
Example #13
Source File: main.py From Gabor_CNN_PyTorch with MIT License | 6 votes |
def train(epoch): model.train() global iteration st = time.time() for batch_idx, (data, target) in enumerate(train_loader): iteration += 1 data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) prec1, = accuracy(output, target) loss = criterion(output, target) loss.backward() optimizer.step() if batch_idx % args.print_freq == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}, Accuracy: {:.2f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item(), prec1.item())) writer.add_scalar('Loss/Train', loss.item(), iteration) writer.add_scalar('Accuracy/Train', prec1, iteration) epoch_time = time.time() - st print('Epoch time:{:0.2f}s'.format(epoch_time)) scheduler.step()
Example #14
Source File: train_search_procedure.py From eval-nas with MIT License | 6 votes |
def _summarize_shared_train(curr_step, total_loss, raw_total_loss, acc=0, acc_5=0, lr=0.0, epoch_steps=1, writer=None): """Logs a set of training steps.""" cur_loss = utils.to_item(total_loss) / epoch_steps cur_raw_loss = utils.to_item(raw_total_loss) / epoch_steps logging.info(f'| step {curr_step:3d} ' f'| lr {lr:4.2f} ' f'| raw loss {cur_raw_loss:.2f} ' f'| loss {cur_loss:.2f} ' f'| acc {acc:8.2f}' f'| acc-5 {acc_5: 8.2f}') # Tensorboard if writer is not None: writer.scalar_summary('shared/loss', cur_loss, epoch_steps) writer.scalar_summary('shared/accuracy', acc, epoch_steps)
Example #15
Source File: main_DTN.py From DTN with MIT License | 5 votes |
def base_train(base_train_loader, model_E, criterion, optimizer, epoch): losses = AverageMeter() top1 = AverageMeter() model_E.train() # for param in model_E.parameters(): # param.requires_grad = True for batch_idx, (input, target) in enumerate(base_train_loader): # print(target) input = input.cuda() target = target.cuda(non_blocking=True) # compute output _, output = model_E(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() model_E.weight_norm() if (batch_idx+1)%250==0: print('base_train:', batch_idx+1, 'loss:', losses.avg, 'acc:', top1.avg) return (losses.avg, top1.avg)
Example #16
Source File: test_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 5 votes |
def train(train_queue, model, optimizer, global_step, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() logits, aux_logits = model(input, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.grad_bound) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg, global_step
Example #17
Source File: epoch.py From dfw with MIT License | 5 votes |
def test(model, loader, args, xp): model.eval() if loader.tag == 'val': xp_group = xp.val else: xp_group = xp.test for metric in xp_group.metrics(): metric.reset() for x, y in tqdm(loader, disable=not args.tqdm, desc='{} Epoch'.format(loader.tag.title()), leave=False, total=len(loader)): (x, y) = (x.cuda(), y.cuda()) if args.cuda else (x, y) scores = model(x) xp_group.acc.update(accuracy(scores, y), weighting=x.size(0)) xp_group.timer.update() print('Epoch: [{0}] ({tag})\t' '({timer:.2f}s) \t' 'Obj ----\t' 'Loss ----\t' 'Acc {acc:.2f}% \t' .format(int(xp.epoch.value), tag=loader.tag.title(), timer=xp_group.timer.value, acc=xp_group.acc.value)) if loader.tag == 'val': xp.max_val.update(xp.val.acc.value).log(time=xp.epoch.value) for metric in xp_group.metrics(): metric.log(time=xp.epoch.value)
Example #18
Source File: train_imagenet.py From eval-nas with MIT License | 5 votes |
def train(train_queue, model, criterion, optimizer): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): target = target.cuda(async=True) input = input.cuda() input = Variable(input) target = Variable(target) optimizer.zero_grad() logits, logits_aux = model(input) loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logits_aux, target) loss += args.auxiliary_weight*loss_aux loss.backward() nn.utils.clip_grad_norm(model.parameters(), args.grad_clip) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #19
Source File: train.py From eval-nas with MIT License | 5 votes |
def train(train_queue, model, criterion, optimizer): objs = utils.AverageMeter() top1 = utils.AverageMeter() top5 = utils.AverageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = Variable(input).cuda() target = Variable(target).cuda(async=True) optimizer.zero_grad() logits, logits_aux = model(input) loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logits_aux, target) loss += args.auxiliary_weight*loss_aux loss.backward() nn.utils.clip_grad_norm(model.parameters(), args.grad_clip) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data[0], n) top1.update(prec1.data[0], n) top5.update(prec5.data[0], n) if step % args.report_freq == 0: logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg
Example #20
Source File: test_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 5 votes |
def train(train_queue, model, optimizer, global_step, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() logits, aux_logits = model(input, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.grad_bound) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg, global_step
Example #21
Source File: nasbench_weight_sharing_policy.py From eval-nas with MIT License | 5 votes |
def evaluate(self, epoch, data_source, fitnesses_dict=None, train_queue=None, model_spec_id_pool=None): """ Full evaluation of all possible models. :param epoch: :param data_source: :param fitnesses_dict: Store the model_spec_id -> accuracy :return: """ # Make sure this id pool is not None. model_spec_id_pool = model_spec_id_pool or self.evaluate_model_spec_id_pool() rank_gens, eval_result = procedure_ops.evaluate_procedure.evaluate_normal( self, self.parallel_model, fitnesses_dict, model_spec_id_pool, data_source, change_model_spec, self._loss ) self.ranking_per_epoch[epoch] = rank_gens self.eval_result[epoch] = eval_result self.logger.info('VALIDATION RANKING OF PARTICLES') for pos, elem in enumerate(rank_gens): self.logger.info(f'particle gen id: {elem[1].geno_id}, acc: {elem[1].valid_acc}, obj {elem[1].valid_obj}, ' f'hash: {elem[0]}, pos {pos}') # save the eval arch pool. archs = [elem[1].geno_id for elem in rank_gens] perfs = [elem[1].valid_acc for elem in rank_gens] self.save_arch_pool_performance(archs, perfs, prefix='eval') self.save_duplicate_arch_pool(prefix='eval', epoch=epoch) self.search_space.eval_model_spec_id_rank(archs, perfs) if self.writer: # process data into list. accs_after, objs_after = zip(*eval_result.values()) tensorboard_summarize_list(accs_after, writer=self.writer, key='neweval_after/acc', step=epoch, ascending=False) tensorboard_summarize_list(objs_after, writer=self.writer, key='neweval_after/obj', step=epoch) return fitnesses_dict
Example #22
Source File: nasbench_weight_sharing_policy.py From eval-nas with MIT License | 5 votes |
def child_valid(self, model, valid_queue, arch_pool, criterion): valid_acc_list = [] objs = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() logging.info("num valid arch {}".format(len(arch_pool))) with torch.no_grad(): model.eval() for i, arch in enumerate(arch_pool): # for step, (inputs, targets) in enumerate(valid_queue): inputs, targets = valid_queue.next_batch() inputs = inputs.cuda() targets = targets.cuda() n = inputs.size(0) arch_l = arch model = change_model_spec(model, self.search_space.topologies[arch]) logits, _ = model(inputs) loss = criterion(logits, targets) prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5)) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) valid_acc_list.append(prec1.data / 100) if (i + 1) % 100 == 0: logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', self.search_space.process_archname_by_id(arch_l), loss, prec1, prec5) self.save_arch_pool_performance(arch_pool, valid_acc_list, prefix='valid') return valid_acc_list, objs.avg, top1.avg
Example #23
Source File: train_search.py From NAO_pytorch with GNU General Public License v3.0 | 5 votes |
def child_train(train_queue, model, optimizer, global_step, arch_pool, arch_pool_prob, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() # sample an arch to train arch = utils.sample_arch(arch_pool, arch_pool_prob) logits, aux_logits = model(input, arch, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.child_grad_bound) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('Train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) logging.info('Arch: %s', ' '.join(map(str, arch[0] + arch[1]))) return top1.avg, objs.avg, global_step
Example #24
Source File: train_imagenet.py From NAO_pytorch with GNU General Public License v3.0 | 5 votes |
def train(train_queue, model, optimizer, global_step, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() logits, aux_logits = model(input, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg, global_step
Example #25
Source File: train_search.py From eval-nas with MIT License | 5 votes |
def child_train(train_queue, model, optimizer, global_step, arch_pool, arch_pool_prob, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() # sample an arch to train arch = utils.sample_arch(arch_pool, arch_pool_prob) logits, aux_logits = model(input, arch, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.child_grad_bound) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('Train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) logging.info('Arch: %s', ' '.join(map(str, arch[0] + arch[1]))) return top1.avg, objs.avg, global_step
Example #26
Source File: train_cifar.py From NAO_pytorch with GNU General Public License v3.0 | 5 votes |
def train(train_queue, model, optimizer, global_step, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = input.cuda().requires_grad_() target = target.cuda() optimizer.zero_grad() logits, aux_logits = model(input, global_step) global_step += 1 loss = criterion(logits, target) if aux_logits is not None: aux_loss = criterion(aux_logits, target) loss += 0.4 * aux_loss loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.grad_bound) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data, n) top1.update(prec1.data, n) top5.update(prec5.data, n) if (step+1) % 100 == 0: logging.info('train %03d loss %e top1 %f top5 %f', step+1, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg, global_step
Example #27
Source File: train.py From DetNAS with MIT License | 5 votes |
def validate(model, device, args, *, all_iters=None): objs = AvgrageMeter() top1 = AvgrageMeter() top5 = AvgrageMeter() loss_function = args.loss_function val_dataprovider = args.val_dataprovider model.eval() max_val_iters = 250 t1 = time.time() with torch.no_grad(): for _ in range(1, max_val_iters + 1): data, target = val_dataprovider.next() target = target.type(torch.LongTensor) data, target = data.to(device), target.to(device) output = model(data) loss = loss_function(output, target) prec1, prec5 = accuracy(output, target, topk=(1, 5)) n = data.size(0) objs.update(loss.item(), n) top1.update(prec1.item(), n) top5.update(prec5.item(), n) logInfo = 'TEST Iter {}: loss = {:.6f},\t'.format(all_iters, objs.avg) + \ 'Top-1 err = {:.6f},\t'.format(1 - top1.avg / 100) + \ 'Top-5 err = {:.6f},\t'.format(1 - top5.avg / 100) + \ 'val_time = {:.6f}'.format(time.time() - t1) logging.info(logInfo)
Example #28
Source File: main.py From Gabor_CNN_PyTorch with MIT License | 5 votes |
def test(epoch): model.eval() test_loss = AverageMeter() acc = AverageMeter() with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss.update(F.cross_entropy(output, target, reduction='mean').item(), target.size(0)) prec1, = accuracy(output, target) # test precison in one batch acc.update(prec1.item(), target.size(0)) print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(test_loss.avg, acc.avg)) writer.add_scalar('Loss/Test', test_loss.avg, epoch) writer.add_scalar('Accuracy/Test', acc.avg, epoch) return acc.avg
Example #29
Source File: augment.py From pt.darts with MIT License | 5 votes |
def validate(valid_loader, model, criterion, epoch, cur_step): top1 = utils.AverageMeter() top5 = utils.AverageMeter() losses = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (X, y) in enumerate(valid_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) logits, _ = model(X) loss = criterion(logits, y) prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(valid_loader)-1: logger.info( "Valid: [{:3d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('val/loss', losses.avg, cur_step) writer.add_scalar('val/top1', top1.avg, cur_step) writer.add_scalar('val/top5', top5.avg, cur_step) logger.info("Valid: [{:3d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg)) return top1.avg
Example #30
Source File: search.py From pt.darts with MIT License | 5 votes |
def validate(valid_loader, model, epoch, cur_step): top1 = utils.AverageMeter() top5 = utils.AverageMeter() losses = utils.AverageMeter() model.eval() with torch.no_grad(): for step, (X, y) in enumerate(valid_loader): X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) N = X.size(0) logits = model(X) loss = model.criterion(logits, y) prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) losses.update(loss.item(), N) top1.update(prec1.item(), N) top5.update(prec5.item(), N) if step % config.print_freq == 0 or step == len(valid_loader)-1: logger.info( "Valid: [{:2d}/{}] Step {:03d}/{:03d} Loss {losses.avg:.3f} " "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( epoch+1, config.epochs, step, len(valid_loader)-1, losses=losses, top1=top1, top5=top5)) writer.add_scalar('val/loss', losses.avg, cur_step) writer.add_scalar('val/top1', top1.avg, cur_step) writer.add_scalar('val/top5', top5.avg, cur_step) logger.info("Valid: [{:2d}/{}] Final Prec@1 {:.4%}".format(epoch+1, config.epochs, top1.avg)) return top1.avg