Python chainer.iterators.SerialIterator() Examples
The following are 30
code examples of chainer.iterators.SerialIterator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.iterators
, or try the search function
.
Example #1
Source File: test_r2_score_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_r2_score_evaluator_ignore_nan_with_nonnan_value(inputs): predictor = DummyPredictor() x0, x1, _ = inputs dataset = NumpyTupleDataset(x0, x1) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = R2ScoreEvaluator( iterator, predictor, name='train', ignore_nan=True) repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected = r2_score(x0, x1, ignore_nan=True) pytest.approx(observation['target/r2_score'], expected) # --- test __call__ --- result = evaluator() pytest.approx(result['train/main/r2_score'], expected)
Example #2
Source File: test_semantic_segmentation_evaluator.py From chainercv with MIT License | 6 votes |
def setUp(self): self.label_names = ('a', 'b', 'c') imgs = np.random.uniform(size=(1, 3, 2, 3)) # There are labels for 'a' and 'b', but none for 'c'. pred_labels = np.array([[[1, 1, 1], [0, 0, 1]]]) gt_labels = np.array([[[1, 0, 0], [0, -1, 1]]]) self.iou_a = 1 / 3 self.iou_b = 2 / 4 self.pixel_accuracy = 3 / 5 self.class_accuracy_a = 1 / 3 self.class_accuracy_b = 2 / 2 self.miou = np.mean((self.iou_a, self.iou_b)) self.mean_class_accuracy = np.mean( (self.class_accuracy_a, self.class_accuracy_b)) self.dataset = TupleDataset(imgs, gt_labels) self.link = _SemanticSegmentationStubLink(pred_labels) self.iterator = SerialIterator( self.dataset, 5, repeat=False, shuffle=False) self.evaluator = SemanticSegmentationEvaluator( self.iterator, self.link, self.label_names)
Example #3
Source File: test_serial_iterator.py From chainer with MIT License | 6 votes |
def test_iterator_repeat(self): dataset = [1, 2, 3, 4, 5, 6] it = iterators.SerialIterator(dataset, 2, shuffle=False) for i in range(3): self.assertEqual(it.epoch, i) self.assertAlmostEqual(it.epoch_detail, i + 0 / 6) if i == 0: self.assertIsNone(it.previous_epoch_detail) else: self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6) self.assertEqual(it.next(), [1, 2]) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, i + 2 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6) self.assertEqual(it.next(), [3, 4]) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, i + 4 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6) self.assertEqual(it.next(), [5, 6]) self.assertTrue(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, i + 6 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
Example #4
Source File: test_detection_coco_evaluator.py From chainercv with MIT License | 6 votes |
def setUp(self): bboxes = [generate_random_bbox(5, (256, 324), 24, 120) for _ in range(10)] areas = [[np.array([(bb[2] - bb[0]) * bb[3] - bb[0]]) for bb in bbox] for bbox in bboxes] labels = 2 * np.ones((10, 5), dtype=np.int32) crowdeds = np.zeros((10, 5)) self.dataset = TupleDataset( np.random.uniform(size=(10, 3, 32, 48)), bboxes, labels, areas, crowdeds) self.link = _DetectionStubLink(bboxes, labels) self.iterator = SerialIterator( self.dataset, 5, repeat=False, shuffle=False) self.evaluator = DetectionCOCOEvaluator( self.iterator, self.link, label_names=('cls0', 'cls1', 'cls2')) self.expected_ap = 1
Example #5
Source File: test_apply_to_iterator.py From chainercv with MIT License | 6 votes |
def test_apply_to_iterator_with_infinite_iterator(self): def func(*in_values): n_sample = len(in_values[0]) return [np.random.uniform(size=(48, 64)) for _ in range(n_sample)] dataset = [] for _ in range(5): H, W = np.random.randint(8, 16, size=2) dataset.append(np.random.randint(0, 256, size=(3, H, W))) iterator = SerialIterator(dataset, 2) in_values, out_values, rest_values = apply_to_iterator(func, iterator) for _ in range(10): next(in_values[0]) for _ in range(10): next(out_values[0])
Example #6
Source File: test_multi_node_evaluator.py From chainer with MIT License | 6 votes |
def check_custom(comm, length, bs): assert bs > 0 assert length > 0 a = list(range(comm.rank, length, comm.size)) b = list(range(comm.rank, length, comm.size)) c = list(range(comm.rank, length, comm.size)) model = ExampleModel() dataset = TupleDataset(a, b, c) iterator = SerialIterator(dataset, bs, shuffle=False, repeat=False) evaluator = CustomMultiNodeEvaluator(comm, iterator, model) result = evaluator(None) iterator.reset() expected = comm.allreduce_obj(sum(2 for batch in iterator)) if comm.rank == 0: assert expected == result else: assert result is None
Example #7
Source File: test_r2_score_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_r2_score_evaluator(inputs): predictor = DummyPredictor() x0, x1, _ = inputs dataset = NumpyTupleDataset(x0, x1) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = R2ScoreEvaluator(iterator, predictor, name='train') repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected = r2_score(x0, x1) pytest.approx(observation['target/r2_score'], expected) # --- test __call__ --- result = evaluator() pytest.approx(result['train/main/r2_score'], expected)
Example #8
Source File: test_r2_score_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_r2_score_evaluator_ignore_nan(inputs): predictor = DummyPredictor() x0, _, x2 = inputs dataset = NumpyTupleDataset(x0, x2) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = R2ScoreEvaluator( iterator, predictor, name='train', ignore_nan=True) repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected = r2_score(x0, x2, ignore_nan=True) pytest.approx(observation['target/r2_score'], expected) # --- test __call__ --- result = evaluator() pytest.approx(result['train/main/r2_score'], expected)
Example #9
Source File: test_prc_auc_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_prc_auc_evaluator_default_args(data0): predictor = DummyPredictor() dataset = NumpyTupleDataset(*data0) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = PRCAUCEvaluator( iterator, predictor, name='train', pos_labels=1, ignore_labels=None ) repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected_prc_auc = 0.7916 pytest.approx(observation['target/prc_auc'], expected_prc_auc) # --- test __call__ --- result = evaluator() pytest.approx(result['train/main/prc_auc'], expected_prc_auc)
Example #10
Source File: test_prc_auc_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_prc_auc_evaluator_with_labels(data1): """test `pos_labels` and `ignore_labels` behavior""" predictor = DummyPredictor() dataset = NumpyTupleDataset(*data1) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = PRCAUCEvaluator( iterator, predictor, name='val', pos_labels=[1, 2], ignore_labels=-1, ) # --- test evaluate --- repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected_prc_auc = 0.7916 pytest.approx(observation['target/prc_auc'], expected_prc_auc) # --- test __call__ --- result = evaluator() pytest.approx(result['val/main/prc_auc'], expected_prc_auc)
Example #11
Source File: test_roc_auc_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_roc_auc_evaluator_default_args(data0): predictor = DummyPredictor() dataset = NumpyTupleDataset(*data0) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = ROCAUCEvaluator( iterator, predictor, name='train', pos_labels=1, ignore_labels=None ) repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected_roc_auc = 0.75 # print('observation ', observation) assert observation['target/roc_auc'] == expected_roc_auc # --- test __call__ --- result = evaluator() # print('result ', result) assert result['train/main/roc_auc'] == expected_roc_auc
Example #12
Source File: test_prc_auc_evaluator.py From chainer-chemistry with MIT License | 6 votes |
def _test_prc_auc_evaluator_raise_error(data, raise_value_error=True): predictor = DummyPredictor() dataset = NumpyTupleDataset(*data) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = PRCAUCEvaluator( iterator, predictor, name='train', pos_labels=1, ignore_labels=None, raise_value_error=raise_value_error ) repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() return observation['target/prc_auc']
Example #13
Source File: test_serial_iterator.py From chainer with MIT License | 6 votes |
def test_iterator_not_repeat(self): dataset = [1, 2, 3, 4, 5, 6] it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False) self.assertAlmostEqual(it.epoch_detail, 0 / 6) self.assertIsNone(it.previous_epoch_detail) self.assertEqual(it.next(), [1, 2]) self.assertAlmostEqual(it.epoch_detail, 2 / 6) self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6) self.assertEqual(it.next(), [3, 4]) self.assertAlmostEqual(it.epoch_detail, 4 / 6) self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6) self.assertEqual(it.next(), [5, 6]) self.assertTrue(it.is_new_epoch) self.assertEqual(it.epoch, 1) self.assertAlmostEqual(it.epoch_detail, 6 / 6) self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6) for i in range(2): self.assertRaises(StopIteration, it.next)
Example #14
Source File: test_serial_iterator.py From chainer with MIT License | 6 votes |
def test_iterator_not_repeat_not_even(self): dataset = [1, 2, 3, 4, 5] it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=self.shuffle, order_sampler=self.order_sampler) self.assertAlmostEqual(it.epoch_detail, 0 / 5) self.assertIsNone(it.previous_epoch_detail) batch1 = it.next() self.assertAlmostEqual(it.epoch_detail, 2 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5) batch2 = it.next() self.assertAlmostEqual(it.epoch_detail, 4 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5) batch3 = it.next() self.assertAlmostEqual(it.epoch_detail, 5 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5) self.assertRaises(StopIteration, it.next) self.assertEqual(len(batch3), 1) self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
Example #15
Source File: test_serial_iterator.py From chainer with MIT License | 6 votes |
def test_iterator_not_repeat_not_even(self): dataset = [1, 2, 3, 4, 5] it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False) self.assertAlmostEqual(it.epoch_detail, 0 / 5) self.assertIsNone(it.previous_epoch_detail) self.assertEqual(it.next(), [1, 2]) self.assertAlmostEqual(it.epoch_detail, 2 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5) self.assertEqual(it.next(), [3, 4]) self.assertAlmostEqual(it.epoch_detail, 4 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5) self.assertEqual(it.next(), [5]) self.assertTrue(it.is_new_epoch) self.assertEqual(it.epoch, 1) self.assertAlmostEqual(it.epoch_detail, 5 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5) self.assertRaises(StopIteration, it.next)
Example #16
Source File: plot_chainer_MLP.py From soft-dtw with BSD 2-Clause "Simplified" License | 6 votes |
def train(network, loss, X_tr, Y_tr, X_te, Y_te, n_epochs=30, gamma=1): model= Objective(network, loss=loss, gamma=gamma) #optimizer = optimizers.SGD() optimizer = optimizers.Adam() optimizer.setup(model) train = tuple_dataset.TupleDataset(X_tr, Y_tr) test = tuple_dataset.TupleDataset(X_te, Y_te) train_iter = iterators.SerialIterator(train, batch_size=1, shuffle=True) test_iter = iterators.SerialIterator(test, batch_size=1, repeat=False, shuffle=False) updater = training.StandardUpdater(train_iter, optimizer) trainer = training.Trainer(updater, (n_epochs, 'epoch')) trainer.run()
Example #17
Source File: NNet.py From alpha-zero-general with MIT License | 5 votes |
def _train_trainer(self, examples): """Training with chainer trainer module""" train_iter = SerialIterator(examples, args.batch_size) optimizer = optimizers.Adam(alpha=args.lr) optimizer.setup(self.nnet) def loss_func(boards, target_pis, target_vs): out_pi, out_v = self.nnet(boards) l_pi = self.loss_pi(target_pis, out_pi) l_v = self.loss_v(target_vs, out_v) total_loss = l_pi + l_v chainer.reporter.report({ 'loss': total_loss, 'loss_pi': l_pi, 'loss_v': l_v, }, observer=self.nnet) return total_loss updater = training.StandardUpdater( train_iter, optimizer, device=args.device, loss_func=loss_func, converter=converter) # Set up the trainer. trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.out) # trainer.extend(extensions.snapshot(), trigger=(args.epochs, 'epoch')) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport([ 'epoch', 'main/loss', 'main/loss_pi', 'main/loss_v', 'elapsed_time'])) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run()
Example #18
Source File: test_semantic_segmentation_evaluator.py From chainercv with MIT License | 5 votes |
def test_consistency(self): reporter = chainer.Reporter() if self.comm.rank == 0: multi_iterator = SerialIterator( self.dataset, self.batchsize, repeat=False, shuffle=False) else: multi_iterator = None multi_link = _SemanticSegmentationStubLink( self.labels, self.initial_count) multi_evaluator = SemanticSegmentationEvaluator( multi_iterator, multi_link, label_names=('cls0', 'cls1', 'cls2'), comm=self.comm) reporter.add_observer('target', multi_link) with reporter: multi_mean = multi_evaluator.evaluate() if self.comm.rank != 0: self.assertEqual(multi_mean, {}) return single_iterator = SerialIterator( self.dataset, self.batchsize, repeat=False, shuffle=False) single_link = _SemanticSegmentationStubLink( self.labels) single_evaluator = SemanticSegmentationEvaluator( single_iterator, single_link, label_names=('cls0', 'cls1', 'cls2')) reporter.add_observer('target', single_link) with reporter: single_mean = single_evaluator.evaluate() self.assertEqual(set(multi_mean.keys()), set(single_mean.keys())) for key in multi_mean.keys(): np.testing.assert_equal(single_mean[key], multi_mean[key])
Example #19
Source File: test_instance_segmentation_coco_evaluator.py From chainercv with MIT License | 5 votes |
def setUp(self): masks = np.random.uniform(size=(10, 5, 32, 48)) > 0.5 labels = np.ones((10, 5), dtype=np.int32) self.dataset = TupleDataset( np.random.uniform(size=(10, 3, 32, 48)), masks, labels) self.link = _InstanceSegmentationStubLink(masks, labels) self.iterator = SerialIterator( self.dataset, 1, repeat=False, shuffle=False) self.evaluator = InstanceSegmentationCOCOEvaluator( self.iterator, self.link, label_names=('cls0', 'cls1', 'cls2')) self.expected_ap = 1
Example #20
Source File: test_progress_hook.py From chainercv with MIT License | 5 votes |
def test_progress_hook(self): iterator = SerialIterator(self.dataset, 2, repeat=False) in_values, out_values, rest_values = apply_to_iterator( self.func, iterator, hook=ProgressHook(n_total=len(self.dataset))) # consume all data for _ in in_values[0]: pass
Example #21
Source File: test_progress_hook.py From chainercv with MIT License | 5 votes |
def test_progress_hook_with_infinite_iterator(self): iterator = SerialIterator(self.dataset, 2) in_values, out_values, rest_values = apply_to_iterator( self.func, iterator, hook=ProgressHook()) for _ in range(10): next(in_values[0])
Example #22
Source File: train.py From chainer-wasserstein-gan with MIT License | 5 votes |
def train(args): nz = args.nz batch_size = args.batch_size epochs = args.epochs gpu = args.gpu # CIFAR-10 images in range [-1, 1] (tanh generator outputs) train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2) train -= 1.0 train_iter = iterators.SerialIterator(train, batch_size) z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz), batch_size) optimizer_generator = optimizers.RMSprop(lr=0.00005) optimizer_critic = optimizers.RMSprop(lr=0.00005) optimizer_generator.setup(Generator()) optimizer_critic.setup(Critic()) updater = WassersteinGANUpdater( iterator=train_iter, noise_iterator=z_iter, optimizer_generator=optimizer_generator, optimizer_critic=optimizer_critic, device=gpu) trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch')) trainer.extend(extensions.ProgressBar()) trainer.extend(extensions.LogReport(trigger=(1, 'iteration'))) trainer.extend(GeneratorSample(), trigger=(1, 'epoch')) trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss', 'critic/loss/real', 'critic/loss/fake', 'generator/loss'])) trainer.run()
Example #23
Source File: predict_own_dataset.py From chainer-chemistry with MIT License | 5 votes |
def main(): # Parse the arguments. args = parse_arguments() if args.label: labels = args.label else: raise ValueError('No target label was specified.') # Dataset preparation. def postprocess_label(label_list): return numpy.asarray(label_list, dtype=numpy.float32) print('Preprocessing dataset...') preprocessor = preprocess_method_dict[args.method]() parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label, labels=labels, smiles_col='SMILES') dataset = parser.parse(args.datafile)['dataset'] test = dataset print('Predicting...') # Set up the regressor. device = chainer.get_device(args.device) model_path = os.path.join(args.in_dir, args.model_filename) regressor = Regressor.load_pickle(model_path, device=device) # Perform the prediction. print('Evaluating...') converter = converter_method_dict[args.method] test_iterator = SerialIterator(test, 16, repeat=False, shuffle=False) eval_result = Evaluator(test_iterator, regressor, converter=converter, device=device)() print('Evaluation result: ', eval_result) save_json(os.path.join(args.in_dir, 'eval_result.json'), eval_result)
Example #24
Source File: utils.py From Lighthead-RCNN-in-Pytorch0.4.1 with MIT License | 5 votes |
def eva_coco(dataset, func, limit = 1000, preset = 'evaluate'): total = limit if limit else len(dataset) orig_ids = dataset.ids.copy() dataset.ids = dataset.ids[:total] iterator = iterators.SerialIterator(dataset, 1, repeat=False, shuffle=False) in_values, out_values, rest_values = apply_to_iterator(func, iterator, hook=ProgressHook(len(dataset))) pred_bboxes, pred_labels, pred_scores = out_values gt_bboxes, gt_labels, gt_areas, gt_crowdeds = rest_values result = eval_detection_coco(pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels, gt_areas, gt_crowdeds) keys = [ 'map/iou=0.50:0.95/area=all/max_dets=100', 'map/iou=0.50/area=all/max_dets=100', 'map/iou=0.75/area=all/max_dets=100', 'map/iou=0.50:0.95/area=small/max_dets=100', 'map/iou=0.50:0.95/area=medium/max_dets=100', 'map/iou=0.50:0.95/area=large/max_dets=100', 'mar/iou=0.50:0.95/area=all/max_dets=1', 'mar/iou=0.50:0.95/area=all/max_dets=10', 'mar/iou=0.50:0.95/area=all/max_dets=100', 'mar/iou=0.50:0.95/area=small/max_dets=100', 'mar/iou=0.50:0.95/area=medium/max_dets=100', 'mar/iou=0.50:0.95/area=large/max_dets=100', ] print('') results = [] for key in keys: print('{:s}: {:f}'.format(key, result[key])) results.append(result[key]) dataset.ids = orig_ids return results
Example #25
Source File: test_train_utils.py From chainer-chemistry with MIT License | 5 votes |
def test_run_train_cpu_iterator(model, train_data, valid_data): train_iter = SerialIterator(train_data, batch_size=4) valid_iter = SerialIterator(valid_data, batch_size=4, shuffle=False, repeat=False) run_train(model, train_iter, valid=valid_iter, epoch=1, batch_size=8, extensions_list=[lambda t: None])
Example #26
Source File: test_instance_segmentation_voc_evaluator.py From chainercv with MIT License | 5 votes |
def setUp(self): masks = np.random.uniform(size=(10, 5, 32, 48)) > 0.5 labels = np.ones((10, 5), dtype=np.int32) self.dataset = TupleDataset( np.random.uniform(size=(10, 3, 32, 48)), masks, labels) self.link = _InstanceSegmentationStubLink(masks, labels) self.iterator = SerialIterator( self.dataset, 1, repeat=False, shuffle=False) self.evaluator = InstanceSegmentationVOCEvaluator( self.iterator, self.link, label_names=('cls0', 'cls1', 'cls2')) self.expected_ap = 1
Example #27
Source File: test_roc_auc_evaluator.py From chainer-chemistry with MIT License | 5 votes |
def _test_roc_auc_evaluator_with_labels(data1): """test `pos_labels` and `ignore_labels` behavior""" predictor = DummyPredictor() dataset = NumpyTupleDataset(*data1) iterator = SerialIterator(dataset, 2, repeat=False, shuffle=False) evaluator = ROCAUCEvaluator( iterator, predictor, name='val', pos_labels=[1, 2], ignore_labels=-1, ) # --- test evaluate --- repo = chainer.Reporter() repo.add_observer('target', predictor) with repo: observation = evaluator.evaluate() expected_roc_auc = 0.75 # print('observation ', observation) assert observation['target/roc_auc'] == expected_roc_auc # --- test __call__ --- result = evaluator() # print('result ', result) assert result['val/main/roc_auc'] == expected_roc_auc
Example #28
Source File: test_multi_node_evaluator.py From chainer with MIT License | 5 votes |
def check_generic(comm, length, bs): assert bs > 0 assert length > 0 a = list(range(comm.rank, length, comm.size)) b = list(range(comm.rank, length, comm.size)) c = list(range(comm.rank, length, comm.size)) model = ExampleModel() dataset = TupleDataset(a, b, c) iterator = SerialIterator(dataset, bs, shuffle=False, repeat=False) evaluator = GenericMultiNodeEvaluator(comm, iterator, model) results = evaluator(None) # Make expected answer iterator.reset() s = [[aa + bb + cc # Same calculation as model for aa, bb, cc in batch] for batch in iterator] s = comm.gather_obj(s) if comm.rank == 0: # flatten list of lists gathered expected = [] for e in zip(*s): expected.extend(e) for e, r in zip(expected, results): chainer.testing.assert_allclose(e, r) else: assert results is None
Example #29
Source File: test_serial_iterator.py From chainer with MIT License | 5 votes |
def test_iterator_repeat_not_even(self): dataset = [1, 2, 3, 4, 5] it = iterators.SerialIterator(dataset, 2, shuffle=False) self.assertEqual(it.epoch, 0) self.assertAlmostEqual(it.epoch_detail, 0 / 5) self.assertIsNone(it.previous_epoch_detail) self.assertEqual(it.next(), [1, 2]) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, 2 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5) self.assertEqual(it.next(), [3, 4]) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, 4 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5) self.assertEqual(it.next(), [5, 1]) self.assertTrue(it.is_new_epoch) self.assertEqual(it.epoch, 1) self.assertAlmostEqual(it.epoch_detail, 6 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5) self.assertEqual(it.next(), [2, 3]) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, 8 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 6 / 5) self.assertEqual(it.next(), [4, 5]) self.assertTrue(it.is_new_epoch) self.assertEqual(it.epoch, 2) self.assertAlmostEqual(it.epoch_detail, 10 / 5) self.assertAlmostEqual(it.previous_epoch_detail, 8 / 5)
Example #30
Source File: test_serial_iterator.py From chainer with MIT License | 5 votes |
def test_iterator_repeat(self): dataset = [1, 2, 3, 4, 5, 6] it = iterators.SerialIterator(dataset, 2, shuffle=self.shuffle, order_sampler=self.order_sampler) for i in range(3): self.assertEqual(it.epoch, i) self.assertAlmostEqual(it.epoch_detail, i + 0 / 6) if i == 0: self.assertIsNone(it.previous_epoch_detail) else: self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6) batch1 = it.next() self.assertEqual(len(batch1), 2) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, i + 2 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6) batch2 = it.next() self.assertEqual(len(batch2), 2) self.assertFalse(it.is_new_epoch) self.assertAlmostEqual(it.epoch_detail, i + 4 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6) batch3 = it.next() self.assertEqual(len(batch3), 2) self.assertTrue(it.is_new_epoch) self.assertEqual(sorted(batch1 + batch2 + batch3), dataset) self.assertAlmostEqual(it.epoch_detail, i + 6 / 6) self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)