Python evaluator.Evaluator() Examples
The following are 10
code examples of evaluator.Evaluator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
evaluator
, or try the search function
.
Example #1
Source File: eval.py From easy-faster-rcnn.pytorch with MIT License | 6 votes |
def _eval(path_to_checkpoint: str, dataset_name: str, backbone_name: str, path_to_data_dir: str, path_to_results_dir: str): dataset = DatasetBase.from_name(dataset_name)(path_to_data_dir, DatasetBase.Mode.EVAL, Config.IMAGE_MIN_SIDE, Config.IMAGE_MAX_SIDE) evaluator = Evaluator(dataset, path_to_data_dir, path_to_results_dir) Log.i('Found {:d} samples'.format(len(dataset))) backbone = BackboneBase.from_name(backbone_name)(pretrained=False) model = Model(backbone, dataset.num_classes(), pooler_mode=Config.POOLER_MODE, anchor_ratios=Config.ANCHOR_RATIOS, anchor_sizes=Config.ANCHOR_SIZES, rpn_pre_nms_top_n=Config.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=Config.RPN_POST_NMS_TOP_N).cuda() model.load(path_to_checkpoint) Log.i('Start evaluating with 1 GPU (1 batch per GPU)') mean_ap, detail = evaluator.evaluate(model) Log.i('Done') Log.i('mean AP = {:.4f}'.format(mean_ap)) Log.i('\n' + detail)
Example #2
Source File: eval.py From SlowFast-Network-pytorch with MIT License | 6 votes |
def _eval(path_to_checkpoint, backbone_name, path_to_results_dir): dataset = AVA_video(EvalConfig.VAL_DATA) evaluator = Evaluator(dataset, path_to_results_dir) Log.i('Found {:d} samples'.format(len(dataset))) backbone = BackboneBase.from_name(backbone_name)() model = Model(backbone, dataset.num_classes(), pooler_mode=Config.POOLER_MODE, anchor_ratios=Config.ANCHOR_RATIOS, anchor_sizes=Config.ANCHOR_SIZES, rpn_pre_nms_top_n=TrainConfig.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=TrainConfig.RPN_POST_NMS_TOP_N).cuda() model.load(path_to_checkpoint) print("load from:",path_to_checkpoint) Log.i('Start evaluating with 1 GPU (1 batch per GPU)') mean_ap, detail = evaluator.evaluate(model) Log.i('Done') Log.i('mean AP = {:.4f}'.format(mean_ap)) Log.i('\n' + detail)
Example #3
Source File: exec.py From RegRCNN with Apache License 2.0 | 6 votes |
def test(cf, logger, max_fold=None): """performs testing for a given fold (or held out set). saves stats in evaluator. """ logger.time("test_fold") logger.info('starting testing model of fold {} in exp {}'.format(cf.fold, cf.exp_dir)) net = model.net(cf, logger).cuda() batch_gen = data_loader.get_test_generator(cf, logger) test_predictor = Predictor(cf, net, logger, mode='test') test_results_list = test_predictor.predict_test_set(batch_gen, return_results = not hasattr( cf, "eval_test_separately") or not cf.eval_test_separately) if test_results_list is not None: test_evaluator = Evaluator(cf, logger, mode='test') test_evaluator.evaluate_predictions(test_results_list) test_evaluator.score_test_df(max_fold=max_fold) logger.info('Testing of fold {} took {}.\n'.format(cf.fold, logger.get_time("test_fold", reset=True, format="hms")))
Example #4
Source File: eval.py From easy-fpn.pytorch with MIT License | 6 votes |
def _eval(path_to_checkpoint: str, dataset_name: str, backbone_name: str, path_to_data_dir: str, path_to_results_dir: str): dataset = DatasetBase.from_name(dataset_name)(path_to_data_dir, DatasetBase.Mode.EVAL, Config.IMAGE_MIN_SIDE, Config.IMAGE_MAX_SIDE) evaluator = Evaluator(dataset, path_to_data_dir, path_to_results_dir) Log.i('Found {:d} samples'.format(len(dataset))) backbone = BackboneBase.from_name(backbone_name)(pretrained=False) model = Model(backbone, dataset.num_classes(), pooling_mode=Config.POOLING_MODE, anchor_ratios=Config.ANCHOR_RATIOS, anchor_scales=Config.ANCHOR_SCALES, rpn_pre_nms_top_n=Config.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=Config.RPN_POST_NMS_TOP_N).cuda() model.load(path_to_checkpoint) mean_ap, detail = evaluator.evaluate(model) Log.i('mean AP = {:.4f}'.format(mean_ap)) Log.i('\n' + detail)
Example #5
Source File: bezos.py From bezos with MIT License | 6 votes |
def main(): print(header) stream = open(args.config, 'r') default = open('./configs/default.yaml', 'r') parameters = load(stream) default_parameters = load(default) if(args.command == 'train'): parameters = merge(default_parameters, parameters) print("Training parameters\n-------") print_dic(parameters) runner = Runner(**parameters) runner.run() else: parameters = merge(merge(default_parameters, parameters), { 'deterministic_evaluation': args.det, 'load_dir': args.load_dir }) evaluator = Evaluator(**parameters) evaluator.evaluate()
Example #6
Source File: trainer.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _init_eval(self): logging.info("Init eval") x_pre, x, y = [self.g0_inputs[k] for k in ['x_pre', 'x', 'y']] self.model.set_device('/gpu:0') self.evaluate = Evaluator(self.sess, self.model, self.batch_size, x_pre, x, y, self.data, self.writer, self.hparams)
Example #7
Source File: exec.py From medicaldetectiontoolkit with Apache License 2.0 | 5 votes |
def test(logger): """ perform testing for a given fold (or hold out set). save stats in evaluator. """ logger.info('starting testing model of fold {} in exp {}'.format(cf.fold, cf.exp_dir)) net = model.net(cf, logger).cuda() test_predictor = Predictor(cf, net, logger, mode='test') test_evaluator = Evaluator(cf, logger, mode='test') batch_gen = data_loader.get_test_generator(cf, logger) test_results_list = test_predictor.predict_test_set(batch_gen, return_results=True) test_evaluator.evaluate_predictions(test_results_list) test_evaluator.score_test_df()
Example #8
Source File: test.py From cgd with Apache License 2.0 | 5 votes |
def test(opt): # Load dataset dataset = Dataset(opt.data_dir, opt.train_txt, opt.test_txt, opt.bbox_txt) dataset.print_stats() # Load image transform test_transform = transforms.Compose([ transforms.Resize((opt.image_width, opt.image_height)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # Load data loader test_loader = mx.gluon.data.DataLoader( dataset=ImageData(dataset.test, test_transform), batch_size=opt.batch_size, num_workers=opt.num_workers ) # Load model model = Model(opt) # Load evaluator evaluator = Evaluator(model, test_loader, opt.ctx) # Evaluate recalls = evaluator.evaluate(ranks=opt.recallk) for recallk, recall in zip(opt.recallk, recalls): print("R@{:4d}: {:.4f}".format(recallk, recall))
Example #9
Source File: trainer.py From cleverhans with MIT License | 5 votes |
def _init_eval(self): logging.info("Init eval") x_pre, x, y = [self.g0_inputs[k] for k in ['x_pre', 'x', 'y']] self.model.set_device('/gpu:0') self.evaluate = Evaluator(self.sess, self.model, self.batch_size, x_pre, x, y, self.data, self.writer, self.hparams)
Example #10
Source File: main.py From zsgnet-pytorch with MIT License | 5 votes |
def learner_init(uid, cfg): device_count = torch.cuda.device_count() device = torch.device('cuda') if type(cfg['ratios']) != list: ratios = eval(cfg['ratios'], {}) else: ratios = cfg['ratios'] if type(cfg['scales']) != list: scales = cfg['scale_factor'] * np.array(eval(cfg['scales'], {})) else: scales = cfg['scale_factor'] * np.array(cfg['scales']) num_anchors = len(ratios) * len(scales) qnet = get_default_net(num_anchors=num_anchors, cfg=cfg) qnet = qnet.to(device) qnet = torch.nn.DataParallel(qnet) qlos = get_default_loss( ratios, scales, cfg) qlos = qlos.to(device) qeval = Evaluator(ratios, scales, cfg) # db = get_data(bs=cfg['bs'] * device_count, nw=cfg['nw'], bsv=cfg['bsv'] * device_count, # nwv=cfg['nwv'], devices=cfg['devices'], do_tfms=cfg['do_tfms'], # cfg=cfg, data_cfg=data_cfg) # db = get_data(cfg, ds_name=cfg['ds_to_use']) db = get_data(cfg) opt_fn = partial(torch.optim.Adam, betas=(0.9, 0.99)) # Note: Currently using default optimizer learn = Learner(uid=uid, data=db, mdl=qnet, loss_fn=qlos, opt_fn=opt_fn, eval_fn=qeval, device=device, cfg=cfg) return learn