Python utils.metrics.AverageMeter() Examples

The following are 11 code examples of utils.metrics.AverageMeter(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils.metrics , or try the search function .
Example #1
Source File: condensenet.py    From Pytorch-Project-Template with MIT License 5 votes vote down vote up
def validate(self):
        """
        One epoch validation
        :return:
        """
        tqdm_batch = tqdm(self.data_loader.valid_loader, total=self.data_loader.valid_iterations,
                          desc="Valiation at -{}-".format(self.current_epoch))

        # set the model in training mode
        self.model.eval()

        epoch_loss = AverageMeter()
        top1_acc = AverageMeter()
        top5_acc = AverageMeter()

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)

            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during validation...')

            top1, top5 = cls_accuracy(pred.data, y.data, topk=(1, 5))
            epoch_loss.update(cur_loss.item())
            top1_acc.update(top1.item(), x.size(0))
            top5_acc.update(top5.item(), x.size(0))

        self.logger.info("Validation results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.avg) + "- Top1 Acc: " + str(top1_acc.val) + "- Top5 Acc: " + str(top5_acc.val))

        tqdm_batch.close()

        return top1_acc.avg 
Example #2
Source File: trainer.py    From pytorch_segmentation with MIT License 5 votes vote down vote up
def _reset_metrics(self):
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.total_loss = AverageMeter()
        self.total_inter, self.total_union = 0, 0
        self.total_correct, self.total_label = 0, 0 
Example #3
Source File: train.py    From deep_gcns_torch with MIT License 5 votes vote down vote up
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir, opt.area, True, pre_transform=T.NormalizeScale())
    train_loader = DenseDataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = DenseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(DenseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!') 
Example #4
Source File: train.py    From deep_gcns_torch with MIT License 5 votes vote down vote up
def main():
    opt = OptInit().get_args()
    logging.info('===> Creating dataloader ...')
    train_dataset = GeoData.S3DIS(opt.data_dir, test_area=5, train=True, pre_transform=T.NormalizeScale())
    if opt.multi_gpus:
        train_loader = DataListLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    else:
        train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=4)
    opt.n_classes = train_loader.dataset.num_classes

    logging.info('===> Loading the network ...')
    model = SparseDeepGCN(opt).to(opt.device)
    if opt.multi_gpus:
        model = DataParallel(SparseDeepGCN(opt)).to(opt.device)
    logging.info('===> loading pre-trained ...')
    model, opt.best_value, opt.epoch = load_pretrained_models(model, opt.pretrained_model, opt.phase)
    logging.info(model)

    logging.info('===> Init the optimizer ...')
    criterion = torch.nn.CrossEntropyLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()
    # opt.test_metric = miou
    # opt.test_values = AverageMeter()
    opt.test_value = 0.

    logging.info('===> start training ...')
    for _ in range(opt.total_epochs):
        opt.epoch += 1
        train(model, train_loader, optimizer, scheduler, criterion, opt)
        # test_value = test(model, test_loader, test_metric, opt)
        scheduler.step()
    logging.info('Saving the final model.Finish!') 
Example #5
Source File: condensenet.py    From Pytorch-Project-Template with MIT License 4 votes vote down vote up
def train_one_epoch(self):
        """
        One epoch training function
        """
        # Initialize tqdm
        tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,
                          desc="Epoch-{}-".format(self.current_epoch))
        # Set the model to be in training mode
        self.model.train()
        # Initialize your average meters
        epoch_loss = AverageMeter()
        top1_acc = AverageMeter()
        top5_acc = AverageMeter()

        current_batch = 0
        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)

            # current iteration over total iterations
            progress = float(self.current_epoch * self.data_loader.train_iterations + current_batch) / (
                    self.config.max_epoch * self.data_loader.train_iterations)
            # progress = float(self.current_iteration) / (self.config.max_epoch * self.data_loader.train_iterations)
            x, y = Variable(x), Variable(y)
            lr = adjust_learning_rate(self.optimizer, self.current_epoch, self.config, batch=current_batch,
                                      nBatch=self.data_loader.train_iterations)
            # model
            pred = self.model(x, progress)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during training...')
            # optimizer
            self.optimizer.zero_grad()
            cur_loss.backward()
            self.optimizer.step()

            top1, top5 = cls_accuracy(pred.data, y.data, topk=(1, 5))

            epoch_loss.update(cur_loss.item())
            top1_acc.update(top1.item(), x.size(0))
            top5_acc.update(top5.item(), x.size(0))

            self.current_iteration += 1
            current_batch += 1

            self.summary_writer.add_scalar("epoch/loss", epoch_loss.val, self.current_iteration)
            self.summary_writer.add_scalar("epoch/accuracy", top1_acc.val, self.current_iteration)
        tqdm_batch.close()

        self.logger.info("Training at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + "- Top1 Acc: " + str(top1_acc.val) + "- Top5 Acc: " + str(top5_acc.val)) 
Example #6
Source File: erfnet.py    From Pytorch-Project-Template with MIT License 4 votes vote down vote up
def train_one_epoch(self):
        """
        One epoch training function
        """
        # Initialize tqdm
        tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,
                          desc="Epoch-{}-".format(self.current_epoch))

        # Set the model to be in training mode (for batchnorm)
        self.model.train()
        # Initialize your average meters
        epoch_loss = AverageMeter()
        metrics = IOUMetric(self.config.num_classes)

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)
            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during training...')

            # optimizer
            self.optimizer.zero_grad()
            cur_loss.backward()
            self.optimizer.step()

            epoch_loss.update(cur_loss.item())
            _, pred_max = torch.max(pred, 1)
            metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())

            self.current_iteration += 1
            # exit(0)

        epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
        self.summary_writer.add_scalar("epoch-training/loss", epoch_loss.val, self.current_iteration)
        self.summary_writer.add_scalar("epoch_training/mean_iou", epoch_mean_iou, self.current_iteration)
        tqdm_batch.close()

        print("Training Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + " - acc-: " + str(
            epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
            epoch_iou_class)) 
Example #7
Source File: erfnet.py    From Pytorch-Project-Template with MIT License 4 votes vote down vote up
def validate(self):
        """
        One epoch validation
        :return:
        """
        tqdm_batch = tqdm(self.data_loader.valid_loader, total=self.data_loader.valid_iterations,
                          desc="Valiation at -{}-".format(self.current_epoch))

        # set the model in training mode
        self.model.eval()

        epoch_loss = AverageMeter()
        metrics = IOUMetric(self.config.num_classes)

        for x, y in tqdm_batch:
            if self.cuda:
                x, y = x.pin_memory().cuda(async=self.config.async_loading), y.cuda(async=self.config.async_loading)
            x, y = Variable(x), Variable(y)
            # model
            pred = self.model(x)
            # loss
            cur_loss = self.loss(pred, y)

            if np.isnan(float(cur_loss.item())):
                raise ValueError('Loss is nan during Validation.')

            _, pred_max = torch.max(pred, 1)
            metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())

            epoch_loss.update(cur_loss.item())

        epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()
        self.summary_writer.add_scalar("epoch_validation/loss", epoch_loss.val, self.current_iteration)
        self.summary_writer.add_scalar("epoch_validation/mean_iou", epoch_mean_iou, self.current_iteration)

        print("Validation Results at epoch-" + str(self.current_epoch) + " | " + "loss: " + str(
            epoch_loss.val) + " - acc-: " + str(
            epoch_acc) + "- mean_iou: " + str(epoch_mean_iou) + "\n iou per class: \n" + str(
            epoch_iou_class))

        tqdm_batch.close()

        return epoch_mean_iou, epoch_loss.val 
Example #8
Source File: main.py    From deep_gcns_torch with MIT License 4 votes vote down vote up
def train(model, train_loader, val_loader, test_loader, opt):
    logging.info('===> Init the optimizer ...')
    criterion = nn.NLLLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)  # weight_decay=1e-4
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_adjust_freq, opt.lr_decay_rate)
    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)
    logging.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_part_miou = 0.
    best_test_part_miou = 0.
    test_part_miou_val_best = 0.

    logging.info('===> start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        # reset tracker
        opt.losses.reset()

        train_epoch(model, train_loader, optimizer, criterion, opt)
        val_part_iou, val_shape_mIoU = test(model, val_loader, opt)
        test_part_iou, test_shape_mIoU = test(model, test_loader, opt)

        scheduler.step()

        # ------------------  save ckpt
        if val_part_iou > best_val_part_miou:
            best_val_part_miou = val_part_iou
            test_part_miou_val_best = test_part_iou
            logging.info("Got a new best model on Validation with Part iou {:.4f}".format(best_val_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'val_best')
        if test_part_iou > best_test_part_miou:
            best_test_part_miou = test_part_iou
            logging.info("Got a new best model on Test with Part iou {:.4f}".format(best_test_part_miou))
            save_ckpt(model, optimizer, scheduler, opt, 'test_best')

        # ------------------ show information
        logging.info(
            "===> Epoch {} Category {}-{}, Train Loss {:.4f}, mIoU on val {:.4f}, mIoU on test {:4f}, "
            "Best val mIoU {:.4f} Its test mIoU {:.4f}. Best test mIoU {:.4f}".format(
                opt.epoch, opt.category_no, opt.category, opt.losses.avg, val_part_iou, test_part_iou,
                best_val_part_miou, test_part_miou_val_best, best_test_part_miou))

        info = {
            'loss': opt.losses.avg,
            'val_part_miou': val_part_iou,
            'test_part_miou': test_part_iou,
            'lr': scheduler.get_lr()[0]
        }
        for tag, value in info.items():
            opt.logger.scalar_summary(tag, value, opt.step)

    save_ckpt(model, optimizer, scheduler, opt, 'last')
    logging.info(
        'Saving the final model.Finish! Category {}-{}. Best val part mIoU is {:.4f}. Its test mIoU is {:.4f}. '
        'Best test part mIoU is {:.4f}. Last test mIoU {:.4f} \n\n\n'.
            format(opt.category_no, opt.category, best_val_part_miou, test_part_miou_val_best,
                   best_test_part_miou, test_part_iou)) 
Example #9
Source File: main.py    From deep_gcns_torch with MIT License 4 votes vote down vote up
def train():
    info_format = 'Epoch: [{}]\t loss: {: .6f} train mF1: {: .6f} \t val mF1: {: .6f}\t test mF1: {:.6f} \t ' \
                  'best val mF1: {: .6f}\t best test mF1: {:.6f}'
    opt.printer.info('===> Init the optimizer ...')
    criterion = torch.nn.BCEWithLogitsLoss().to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    scheduler = ReduceLROnPlateau(optimizer, "min", patience=opt.lr_patience, verbose=True, factor=0.5, cooldown=30,
                                  min_lr=opt.lr/100)
    opt.scheduler = 'ReduceLROnPlateau'

    optimizer, scheduler, opt.lr = load_pretrained_optimizer(opt.pretrained_model, optimizer, scheduler, opt.lr)

    opt.printer.info('===> Init Metric ...')
    opt.losses = AverageMeter()

    best_val_value = 0.
    best_test_value = 0.

    opt.printer.info('===> Start training ...')
    for _ in range(opt.epoch, opt.total_epochs):
        opt.epoch += 1
        loss, train_value = train_step(model, train_loader, optimizer, criterion, opt)
        val_value = test(model, valid_loader, opt)
        test_value = test(model, test_loader, opt)

        if val_value > best_val_value:
            best_val_value = val_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='val_best')
        if test_value > best_test_value:
            best_test_value = test_value
            save_ckpt(model, optimizer, scheduler, opt.epoch, opt.save_path, opt.post, name_post='test_best')

        opt.printer.info(info_format.format(opt.epoch, loss, train_value, val_value, test_value, best_val_value,
                                            best_test_value))

        if opt.scheduler == 'ReduceLROnPlateau':
            scheduler.step(opt.losses.avg)
        else:
            scheduler.step()

    opt.printer.info('Saving the final model.Finish!') 
Example #10
Source File: train.py    From DeepVideoCS with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def train(train_loader, model, optimizer, epoch, mseloss, encoder_learn, gradient_clip):
    batch_time = metrics.AverageMeter()
    data_time = metrics.AverageMeter()
    losses = metrics.AverageMeter()
    psnr = metrics.AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, (video_blocks, pad_block_size, block_shape) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        target = video_blocks.cuda(async=True)
        input_var = Variable(video_blocks.cuda())
        target_var = Variable(target)

        # compute output
        model.module.pad_frame_size = pad_block_size.numpy()
        model.module.patch_shape = block_shape.numpy()

        if encoder_learn:
            model.module.measurements.binarization()

        output, y = model(input_var)
        loss = mseloss.compute_loss(output, target_var)
        # record loss
        losses.update(loss.data[0], video_blocks.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()

        if encoder_learn:
            # restore real-valued weights
            model.module.measurements.restore()
            nn.utils.clip_grad_norm(model.module.parameters(), gradient_clip)
        else:
            nn.utils.clip_grad_norm(
                model.module.reconstruction.parameters(), gradient_clip)

        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            logging.info('Epoch: [{0}][{1}/{2}]\t'
                         'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                         'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                             epoch, i, len(train_loader), batch_time=batch_time,
                             data_time=data_time, loss=losses))
    return losses.avg 
Example #11
Source File: train.py    From DeepVideoCS with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def validate(val_loader, model, encoder_learn):
    batch_time = metrics.AverageMeter()
    psnr = metrics.AverageMeter()

    # switch to evaluate mode
    model.cuda()
    model.eval()

    # binarize weights
    if encoder_learn:
        model.module.measurements.binarization()

    end = time.time()
    for i, (video_frames, pad_frame_size, patch_shape) in enumerate(val_loader):
        video_input = Variable(video_frames.cuda(async=True), volatile=True)
        print(val_loader.dataset.videos[i])

        # compute output
        model.module.pad_frame_size = pad_frame_size.numpy()
        model.module.patch_shape = patch_shape.numpy()
        reconstructed_video, y = model(video_input)

        # original video
        reconstructed_video = reconstructed_video.cpu().data.numpy()
        original_video = video_input.cpu().data.numpy()

        # measure accuracy and record loss
        psnr_video = metrics.psnr_accuracy(reconstructed_video, original_video)
        psnr.update(psnr_video, video_frames.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        logging.info('Test: [{0}/{1}]\t'
                     'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                     'PSNR {psnr.val:.3f} ({psnr.avg:.3f})'.format(
                         i + 1, len(val_loader), batch_time=batch_time,
                         psnr=psnr))

    # restore real-valued weights
    if encoder_learn:
        model.module.measurements.restore()

    print(' * PSNR {psnr.avg:.3f}'.format(psnr=psnr))

    return psnr.avg