Python chainer.reporter.report_scope() Examples
The following are 27
code examples of chainer.reporter.report_scope().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.reporter
, or try the search function
.
Example #1
Source File: test_classifier.py From chainer-chemistry with MIT License | 6 votes |
def test_report_key(self, metrics_fun, compute_metrics): repo = chainer.Reporter() link = Classifier(predictor=DummyPredictor(), metrics_fun=metrics_fun) link.compute_metrics = compute_metrics repo.add_observer('target', link) with repo: observation = {} with reporter.report_scope(observation): link(self.x, self.t) # print('observation ', observation) actual_keys = set(observation.keys()) if compute_metrics: if metrics_fun is None: assert set(['target/loss']) == actual_keys elif isinstance(metrics_fun, dict): assert set(['target/loss', 'target/user_key']) == actual_keys elif callable(metrics_fun): assert set(['target/loss', 'target/accuracy']) == actual_keys else: raise TypeError() else: assert set(['target/loss']) == actual_keys
Example #2
Source File: test_regressor.py From chainer-chemistry with MIT License | 6 votes |
def test_report_key(self, metrics_fun, compute_metrics): repo = chainer.Reporter() link = Regressor(predictor=DummyPredictor(), metrics_fun=metrics_fun) link.compute_metrics = compute_metrics repo.add_observer('target', link) with repo: observation = {} with reporter.report_scope(observation): link(self.x, self.t) # print('observation ', observation) actual_keys = set(observation.keys()) if compute_metrics: if metrics_fun is None: assert set(['target/loss']) == actual_keys elif isinstance(metrics_fun, dict): assert set(['target/loss', 'target/user_key']) == actual_keys elif callable(metrics_fun): assert set(['target/loss', 'target/metrics']) == actual_keys else: raise TypeError() else: assert set(['target/loss']) == actual_keys
Example #3
Source File: lm.py From espnet with Apache License 2.0 | 6 votes |
def evaluate(self): val_iter = self.get_iterator("main") target = self.get_target("main") loss = 0 count = 0 for batch in copy.copy(val_iter): x, t = convert.concat_examples(batch, device=self.device, padding=(0, -1)) xp = chainer.backends.cuda.get_array_module(x) state = None for i in six.moves.range(len(x[0])): state, loss_batch = target(state, x[:, i], t[:, i]) non_zeros = xp.count_nonzero(x[:, i]) loss += loss_batch.data * non_zeros count += int(non_zeros) # report validation loss observation = {} with reporter.report_scope(observation): reporter.report({"loss": float(loss / count)}, target) return observation
Example #4
Source File: extensions.py From Semantic-Segmentation-using-Adversarial-Networks with MIT License | 6 votes |
def evaluate(self): iterator = self.get_iterator('main') all_targets = self.get_all_targets() for model in all_targets.values(): if hasattr(model, 'train'): model.train = False if self.eval_hook: self.eval_hook(self) it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): self.updater.forward(batch) self.updater.calc_loss() summary.add(observation) for model in all_targets.values(): if hasattr(model, 'train'): model.train = True return summary.compute_mean()
Example #5
Source File: asr.py From espnet with Apache License 2.0 | 5 votes |
def evaluate(self): """Main evaluate routine for CustomEvaluator.""" iterator = self._iterators["main"] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, "reset"): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() self.model.eval() with torch.no_grad(): for batch in it: x = _recursive_to(batch, self.device) observation = {} with reporter_module.report_scope(observation): # read scp files # x: original json with loaded features # will be converted to chainer variable later if self.ngpu == 0: self.model(*x) else: # apex does not support torch.nn.DataParallel data_parallel(self.model, x, range(self.ngpu)) summary.add(observation) self.model.train() return summary.compute_mean()
Example #6
Source File: MyEvaluator.py From HFT-CNN with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): row_idx, col_idx, val_idx = [], [], [] x = cuda.to_gpu(np.array([i[0] for i in batch])) labels = [l[1] for l in batch] for i in range(len(labels)): l_list = list(set(labels[i])) for y in l_list: row_idx.append(i) col_idx.append(y) val_idx.append(1) m = len(labels) n = self.class_dim t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense() t = cuda.to_gpu(t) with function.no_backprop_mode(): loss = F.sigmoid_cross_entropy(eval_func(x), t) summary.add({MyEvaluator.default_name + '/main/loss':loss}) summary.add(observation) return summary.compute_mean()
Example #7
Source File: evaluator.py From 3dpose_gan with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] gen = self._targets['gen'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): xy_proj, xyz, scale = self.converter(batch, self.device) xy_proj, xyz = xy_proj[:, 0], xyz[:, 0] with function.no_backprop_mode(), \ chainer.using_config('train', False): xy_real = chainer.Variable(xy_proj) z_pred = gen(xy_real) z_mse = F.mean_squared_error(z_pred, xyz[:, 2::3]) chainer.report({'z_mse': z_mse}, gen) lx = gen.xp.power(xyz[:, 0::3] - xy_proj[:, 0::2], 2) ly = gen.xp.power(xyz[:, 1::3] - xy_proj[:, 1::2], 2) lz = gen.xp.power(xyz[:, 2::3] - z_pred.data, 2) euclidean_distance = gen.xp.sqrt(lx + ly + lz).mean(axis=1) euclidean_distance *= scale[:, 0] euclidean_distance = gen.xp.mean(euclidean_distance) chainer.report( {'euclidean_distance': euclidean_distance}, gen) summary.add(observation) return summary.compute_mean()
Example #8
Source File: sequential_evaluator.py From hyperface with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] eval_func = self.eval_func or target if self.eval_hook: self.eval_hook(self) batch = next(iterator) observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) if isinstance(in_arrays, tuple): in_vars = tuple(variable.Variable(x, volatile='on') for x in in_arrays) eval_func(*in_vars) elif isinstance(in_arrays, dict): in_vars = {key: variable.Variable(x, volatile='on') for key, x in six.iteritems(in_arrays)} eval_func(**in_vars) else: in_var = variable.Variable(in_arrays, volatile='on') eval_func(in_var) return observation
Example #9
Source File: instance_segmentation_coco_evaluator.py From chainer-fcis with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) imgs, pred_values, gt_values = apply_to_iterator( target.predict, it) # delete unused iterator explicitly sizes = [img.shape for img in imgs] del imgs pred_bboxes, pred_masks, pred_labels, pred_scores = pred_values gt_bboxes, gt_masks, gt_labels = gt_values result = eval_instance_segmentation_coco( sizes, pred_bboxes, pred_masks, pred_labels, pred_scores, gt_bboxes, gt_masks, gt_labels) report = { 'mAP[0.50:0.95]': result['ap/iou=0.50:0.95/area=all/maxDets=100'], 'mAP[0.50:]': result['ap/iou=0.50/area=all/maxDets=100'], 'mAP[0.50:0.95] (small)': result['ap/iou=0.50:0.95/area=small/maxDets=100'], # NOQA 'mAP[0.50:0.95] (mid)': result['ap/iou=0.50:0.95/area=medium/maxDets=100'], # NOQA 'mAP[0.50:0.95] (large)': result['ap/iou=0.50:0.95/area=large/maxDets=100'], # NOQA } observation = dict() with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #10
Source File: batch_evaluator.py From chainer-chemistry with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) y_total = [] t_total = [] for batch in it: in_arrays = self.converter(batch, self.device) with chainer.no_backprop_mode(), chainer.using_config('train', False): y = eval_func(*in_arrays[:-1]) t = in_arrays[-1] y_data = _get_1d_numpy_array(y) t_data = _get_1d_numpy_array(t) y_total.append(y_data) t_total.append(t_data) y_total = numpy.concatenate(y_total).ravel() t_total = numpy.concatenate(t_total).ravel() # metrics_value = self.metrics_fun(y_total, t_total) metrics = {key: metric_fun(y_total, t_total) for key, metric_fun in self.metrics_fun.items()} observation = {} with reporter.report_scope(observation): reporter.report(metrics, self._targets['main']) return observation
Example #11
Source File: chainer_utility.py From Comicolorization with MIT License | 5 votes |
def evaluate(self): from chainer import reporter import copy iterator = self._iterators['main'] target = self._targets['main'] eval_func = self.eval_func or target if self.eval_hook: self.eval_hook(self) it = copy.copy(iterator) summary = reporter.DictSummary() for batch in it: observation = {} with reporter.report_scope(observation): in_arrays = self.converter(batch, self.device) if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) summary.add(observation) return summary.compute_mean()
Example #12
Source File: lm.py From espnet with Apache License 2.0 | 5 votes |
def evaluate(self): """Evaluate the model.""" val_iter = self.get_iterator("main") loss = 0 nll = 0 count = 0 self.model.eval() with torch.no_grad(): for batch in copy.copy(val_iter): x, t = concat_examples(batch, device=self.device[0], padding=(0, -100)) if self.device[0] == -1: l, n, c = self.model(x, t) else: # apex does not support torch.nn.DataParallel l, n, c = data_parallel(self.model, (x, t), self.device) loss += float(l.sum()) nll += float(n.sum()) count += int(c.sum()) self.model.train() # report validation loss observation = {} with reporter.report_scope(observation): reporter.report({"loss": loss}, self.model.reporter) reporter.report({"nll": nll}, self.model.reporter) reporter.report({"count": count}, self.model.reporter) return observation
Example #13
Source File: instance_segmentation_voc_evaluator.py From chainercv with MIT License | 5 votes |
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_masks, pred_labels, pred_scores = out_values gt_masks, gt_labels = rest_values result = eval_instance_segmentation_voc( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, iou_thresh=self.iou_thresh, use_07_metric=self.use_07_metric) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #14
Source File: semantic_segmentation_evaluator.py From chainercv with MIT License | 5 votes |
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_labels, = out_values gt_labels, = rest_values result = eval_semantic_segmentation(pred_labels, gt_labels) report = {'miou': result['miou'], 'pixel_accuracy': result['pixel_accuracy'], 'mean_class_accuracy': result['mean_class_accuracy']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['iou/{:s}'.format(label_name)] = result['iou'][l] report['class_accuracy/{:s}'.format(label_name)] =\ result['class_accuracy'][l] except IndexError: report['iou/{:s}'.format(label_name)] = np.nan report['class_accuracy/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #15
Source File: copy_transformer_eval_function.py From models with MIT License | 5 votes |
def evaluate(self): summary = reporter.DictSummary() eval_func = self.eval_func or self._targets['main'] observation = {} with reporter.report_scope(observation): # we always use the same array for testing, since this is only an example ;) data = eval_func.net.xp.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], dtype='int32') eval_func(data=data, label=data) summary.add(observation) return summary.compute_mean()
Example #16
Source File: multi_label_classification_evaluator.py From models with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( self.predict_func, it) # delete unused iterators explicitly del in_values pred_labels, pred_scores = out_values gt_labels, = rest_values result = eval_multi_label_classification( pred_labels, pred_scores, gt_labels) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #17
Source File: projected_3d_bbox_evaluator.py From models with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) # delete unused iterators explicitly del in_values points, labels, scores = out_values gt_points, gt_labels = rest_values result = eval_projected_3d_bbox_single( points, scores, gt_points, self.vertex, self.intrinsics, diam=self.diam) report = result observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #18
Source File: train_utils.py From see with GNU General Public License v3.0 | 5 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] eval_func = self.eval_func or target if self.eval_hook: self.eval_hook(self) it = copy.copy(iterator) summary = reporter_module.DictSummary() for _ in range(min(len(iterator.dataset) // iterator.batch_size, self.num_iterations)): batch = next(it, None) if batch is None: break observation = {} with reporter_module.report_scope(observation), chainer.using_config('train', False), chainer.using_config('enable_backprop', False): in_arrays = self.converter(batch, self.device) if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) summary.add(observation) return summary.compute_mean()
Example #19
Source File: seq2seq_mp1.py From chainer with MIT License | 5 votes |
def evaluate(self): bt = time.time() with chainer.no_backprop_mode(): references = [] hypotheses = [] observation = {} with reporter.report_scope(observation): for i in range(0, len(self.test_data), self.batch): src, trg = zip(*self.test_data[i:i + self.batch]) references.extend([[t.tolist()] for t in trg]) src = [chainer.dataset.to_device(self.device, x) for x in src] if self.comm.rank == 0: self.model.translate(src, self.max_length) elif self.comm.rank == 1: ys = [y.tolist() for y in self.model.translate( src, self.max_length)] hypotheses.extend(ys) if self.comm.rank == 1: bleu = bleu_score.corpus_bleu( references, hypotheses, smoothing_function=bleu_score. SmoothingFunction().method1) reporter.report({'bleu': bleu}, self.model) et = time.time() if self.comm.rank == 1: print('BleuEvaluator(single)::evaluate(): ' 'took {:.3f} [s]'.format(et - bt)) sys.stdout.flush() return observation
Example #20
Source File: seq2seq.py From chainer with MIT License | 5 votes |
def evaluate(self): bt = time.time() with chainer.no_backprop_mode(): references = [] hypotheses = [] observation = {} with reporter.report_scope(observation): for i in range(0, len(self.test_data), self.batch): src, trg = zip(*self.test_data[i:i + self.batch]) references.extend([[t.tolist()] for t in trg]) src = [chainer.dataset.to_device(self.device, x) for x in src] ys = [y.tolist() for y in self.model.translate(src, self.max_length)] hypotheses.extend(ys) bleu = bleu_score.corpus_bleu( references, hypotheses, smoothing_function=bleu_score.SmoothingFunction().method1) reporter.report({'bleu': bleu}, self.model) et = time.time() if self.comm is not None: # This evaluator is called via chainermn.MultiNodeEvaluator for i in range(0, self.comm.size): print('BleuEvaluator::evaluate(): ' 'took {:.3f} [s]'.format(et - bt)) sys.stdout.flush() self.comm.mpi_comm.Barrier() else: # This evaluator is called from a conventional # Chainer exntension print('BleuEvaluator(single)::evaluate(): ' 'took {:.3f} [s]'.format(et - bt)) sys.stdout.flush() return observation
Example #21
Source File: evaluator.py From contextual_augmentation with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) # summary = reporter_module.DictSummary() summary = collections.defaultdict(list) for batch in it: observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) n_data = len(batch) summary['n'].append(n_data) # summary.add(observation) for k, v in observation.items(): summary[k].append(v) mean = dict() ns = summary['n'] del summary['n'] for k, vs in summary.items(): mean[k] = sum(v * n for v, n in zip(vs, ns)) / sum(ns) return mean # return summary.compute_mean()
Example #22
Source File: instance_segmentation_coco_evaluator.py From chainercv with MIT License | 4 votes |
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_masks, pred_labels, pred_scores = out_values if len(rest_values) == 2: gt_masks, gt_labels = rest_values gt_areas = None gt_crowdeds = None elif len(rest_values) == 4: gt_masks, gt_labels, gt_areas, gt_crowdeds =\ rest_values else: raise ValueError('the dataset should return ' 'sets of (img, mask, label) or sets of ' '(img, mask, label, area, crowded).') result = eval_instance_segmentation_coco( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, gt_areas, gt_crowdeds) report = {} for key in result.keys(): if key.startswith('map') or key.startswith('mar'): report[key] = result[key] if self.label_names is not None: for key in result.keys(): if key.startswith('ap') or key.startswith('ar'): for l, label_name in enumerate(self.label_names): report_key = '{}/{:s}'.format(key, label_name) try: report[report_key] = result[key][l] except IndexError: report[report_key] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #23
Source File: detection_coco_evaluator.py From chainercv with MIT License | 4 votes |
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values if len(rest_values) == 2: gt_bboxes, gt_labels = rest_values gt_areas = None gt_crowdeds = None elif len(rest_values) == 4: gt_bboxes, gt_labels, gt_areas, gt_crowdeds =\ rest_values else: raise ValueError('the dataset should return ' 'sets of (img, bbox, label) or sets of ' '(img, bbox, label, area, crowded).') result = eval_detection_coco( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds) report = {} for key in result.keys(): if key.startswith('map') or key.startswith('mar'): report[key] = result[key] if self.label_names is not None: for key in result.keys(): if key.startswith('ap') or key.startswith('ar'): for l, label_name in enumerate(self.label_names): report_key = '{}/{:s}'.format(key, label_name) try: report[report_key] = result[key][l] except IndexError: report[report_key] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #24
Source File: custom_mean_evaluator.py From kiss with GNU General Public License v3.0 | 4 votes |
def evaluate(self): """Evaluates the model and returns a result dictionary. This method runs the evaluation loop over the validation dataset. It accumulates the reported values to :class:`~chainer.DictSummary` and returns a dictionary whose values are means computed by the summary. Note that this function assumes that the main iterator raises ``StopIteration`` or code in the evaluation loop raises an exception. So, if this assumption is not held, the function could be caught in an infinite loop. Users can override this method to customize the evaluation routine. .. note:: This method encloses :attr:`eval_func` calls with :func:`function.no_backprop_mode` context, so all calculations using :class:`~chainer.FunctionNode`\\s inside :attr:`eval_func` do not make computational graphs. It is for reducing the memory consumption. Returns: dict: Result dictionary. This dictionary is further reported via :func:`~chainer.report` without specifying any observer. """ iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) if self.max_num_iterations is not None: it = self.fixed_num_iterations_iterator(it) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) summary.add(observation) return self.calculate_mean_of_summary(summary)
Example #25
Source File: instance_segmentation_voc_evaluator.py From chainer-mask-rcnn with MIT License | 4 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) if self._show_progress: it = tqdm.tqdm(it, total=len(it.dataset)) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) del in_values pred_bboxes, pred_masks, pred_labels, pred_scores = out_values if len(rest_values) == 4: gt_bboxes, gt_labels, gt_masks, gt_difficults = rest_values elif len(rest_values) == 3: gt_bboxes, gt_labels, gt_masks = rest_values gt_difficults = None # evaluate result = utils.eval_instseg_voc( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, gt_difficults, use_07_metric=self.use_07_metric) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = dict() with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #26
Source File: instance_segmentation_coco_evaluator.py From chainer-mask-rcnn with MIT License | 4 votes |
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) if self._show_progress: it = tqdm.tqdm(it, total=len(it.dataset)) in_values, out_values, rest_values = apply_to_iterator( target.predict, it) del in_values pred_bboxes, pred_masks, pred_labels, pred_scores = out_values if len(rest_values) == 5: gt_bboxes, gt_labels, gt_masks, gt_crowdeds, gt_areas = rest_values elif len(rest_values) == 3: gt_bboxes, gt_labels, gt_masks = rest_values gt_crowdeds = None gt_areas = None # evaluate result = utils.eval_instseg_coco( pred_masks, pred_labels, pred_scores, gt_masks, gt_labels, gt_crowdeds, gt_areas) report = { 'map': result['map/iou=0.50:0.95/area=all/maxDets=100'], 'map@0.5': result['map/iou=0.50/area=all/maxDets=100'], 'map@0.75': result['map/iou=0.75/area=all/maxDets=100'], } if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = \ result['ap/iou=0.50:0.95/area=all/maxDets=100'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = dict() with reporter.report_scope(observation): reporter.report(report, target) return observation
Example #27
Source File: detection_voc_evaluator.py From chainercv with MIT License | 4 votes |
def evaluate(self): target = self._targets['main'] if self.comm is not None and self.comm.rank != 0: apply_to_iterator(target.predict, None, comm=self.comm) return {} iterator = self._iterators['main'] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) in_values, out_values, rest_values = apply_to_iterator( target.predict, it, comm=self.comm) # delete unused iterators explicitly del in_values pred_bboxes, pred_labels, pred_scores = out_values if len(rest_values) == 3: gt_bboxes, gt_labels, gt_difficults = rest_values elif len(rest_values) == 2: gt_bboxes, gt_labels = rest_values gt_difficults = None result = eval_detection_voc( pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_difficults, use_07_metric=self.use_07_metric) report = {'map': result['map']} if self.label_names is not None: for l, label_name in enumerate(self.label_names): try: report['ap/{:s}'.format(label_name)] = result['ap'][l] except IndexError: report['ap/{:s}'.format(label_name)] = np.nan observation = {} with reporter.report_scope(observation): reporter.report(report, target) return observation