Python chainer.training.extensions.PrintReport() Examples
The following are 23
code examples of chainer.training.extensions.PrintReport().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.training.extensions
, or try the search function
.
Example #1
Source File: test_auto_print_report.py From chainer-chemistry with MIT License | 6 votes |
def _setup(self, stream=None, delete_flush=False): self.logreport = mock.MagicMock(spec=extensions.LogReport( ['epoch'], trigger=(1, 'iteration'), log_name=None)) if stream is None: self.stream = mock.MagicMock() if delete_flush: del self.stream.flush else: self.stream = stream self.report = extensions.PrintReport( ['epoch'], log_report=self.logreport, out=self.stream) self.trainer = testing.get_trainer_with_mock_updater( stop_trigger=(1, 'iteration')) self.trainer.extend(self.logreport) self.trainer.extend(self.report) self.logreport.log = [{'epoch': 0}]
Example #2
Source File: test_print_report.py From chainer with MIT License | 6 votes |
def _setup(self, stream=None, delete_flush=False): self.logreport = mock.MagicMock(spec=extensions.LogReport( ['epoch'], trigger=(1, 'iteration'), log_name=None)) if stream is None: self.stream = mock.MagicMock() if delete_flush: del self.stream.flush else: self.stream = stream self.report = extensions.PrintReport( ['epoch'], log_report=self.logreport, out=self.stream) self.trainer = testing.get_trainer_with_mock_updater( stop_trigger=(1, 'iteration')) self.trainer.extend(self.logreport) self.trainer.extend(self.report) self.logreport.log = [{'epoch': 0}]
Example #3
Source File: NNet.py From alpha-zero-general with MIT License | 5 votes |
def _train_trainer(self, examples): """Training with chainer trainer module""" train_iter = SerialIterator(examples, args.batch_size) optimizer = optimizers.Adam(alpha=args.lr) optimizer.setup(self.nnet) def loss_func(boards, target_pis, target_vs): out_pi, out_v = self.nnet(boards) l_pi = self.loss_pi(target_pis, out_pi) l_v = self.loss_v(target_vs, out_v) total_loss = l_pi + l_v chainer.reporter.report({ 'loss': total_loss, 'loss_pi': l_pi, 'loss_v': l_v, }, observer=self.nnet) return total_loss updater = training.StandardUpdater( train_iter, optimizer, device=args.device, loss_func=loss_func, converter=converter) # Set up the trainer. trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.out) # trainer.extend(extensions.snapshot(), trigger=(args.epochs, 'epoch')) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport([ 'epoch', 'main/loss', 'main/loss_pi', 'main/loss_v', 'elapsed_time'])) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run()
Example #4
Source File: test_graph_cnn.py From chainer-graph-cnn with MIT License | 5 votes |
def check_train(self, gpu): outdir = tempfile.mkdtemp() print("outdir: {}".format(outdir)) n_classes = 2 batch_size = 32 devices = {'main': gpu} A = np.array([ [0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0], ]).astype(np.float32) model = graph_cnn.GraphCNN(A, n_out=n_classes) optimizer = optimizers.Adam(alpha=1e-4) optimizer.setup(model) train_dataset = EasyDataset(train=True, n_classes=n_classes) train_iter = chainer.iterators.MultiprocessIterator( train_dataset, batch_size) updater = ParallelUpdater(train_iter, optimizer, devices=devices) trainer = chainer.training.Trainer(updater, (10, 'epoch'), out=outdir) trainer.extend(extensions.LogReport(trigger=(1, 'epoch'))) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'main/loss', 'main/accuracy'])) trainer.extend(extensions.ProgressBar()) trainer.run()
Example #5
Source File: train.py From chainer-wasserstein-gan with MIT License | 5 votes |
def train(args): nz = args.nz batch_size = args.batch_size epochs = args.epochs gpu = args.gpu # CIFAR-10 images in range [-1, 1] (tanh generator outputs) train, _ = datasets.get_cifar10(withlabel=False, ndim=3, scale=2) train -= 1.0 train_iter = iterators.SerialIterator(train, batch_size) z_iter = RandomNoiseIterator(GaussianNoiseGenerator(0, 1, args.nz), batch_size) optimizer_generator = optimizers.RMSprop(lr=0.00005) optimizer_critic = optimizers.RMSprop(lr=0.00005) optimizer_generator.setup(Generator()) optimizer_critic.setup(Critic()) updater = WassersteinGANUpdater( iterator=train_iter, noise_iterator=z_iter, optimizer_generator=optimizer_generator, optimizer_critic=optimizer_critic, device=gpu) trainer = training.Trainer(updater, stop_trigger=(epochs, 'epoch')) trainer.extend(extensions.ProgressBar()) trainer.extend(extensions.LogReport(trigger=(1, 'iteration'))) trainer.extend(GeneratorSample(), trigger=(1, 'epoch')) trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'critic/loss', 'critic/loss/real', 'critic/loss/fake', 'generator/loss'])) trainer.run()
Example #6
Source File: train.py From qb with MIT License | 5 votes |
def main(model): train = read_data(fold=BUZZER_TRAIN_FOLD) valid = read_data(fold=BUZZER_DEV_FOLD) print('# train data: {}'.format(len(train))) print('# valid data: {}'.format(len(valid))) train_iter = chainer.iterators.SerialIterator(train, 64) valid_iter = chainer.iterators.SerialIterator(valid, 64, repeat=False, shuffle=False) optimizer = chainer.optimizers.Adam() optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4)) updater = training.updaters.StandardUpdater(train_iter, optimizer, converter=convert_seq, device=0) trainer = training.Trainer(updater, (20, 'epoch'), out=model.model_dir) trainer.extend(extensions.Evaluator(valid_iter, model, converter=convert_seq, device=0)) record_trigger = training.triggers.MaxValueTrigger('validation/main/accuracy', (1, 'epoch')) trainer.extend(extensions.snapshot_object(model, 'buzzer.npz'), trigger=record_trigger) trainer.extend(extensions.LogReport()) trainer.extend(extensions.ProgressBar()) trainer.extend(extensions.PrintReport([ 'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time' ])) if not os.path.isdir(model.model_dir): os.mkdir(model.model_dir) trainer.run()
Example #7
Source File: train_memnn.py From pfio with MIT License | 4 votes |
def train(train_data_path, test_data_path, args): vocab = collections.defaultdict(lambda: len(vocab)) vocab['<unk>'] = 0 train_data = babi.read_data(vocab, train_data_path) test_data = babi.read_data(vocab, test_data_path) print('Training data: %s: %d' % (train_data_path, len(train_data))) print('Test data: %s: %d' % (test_data_path, len(test_data))) train_data = memnn.convert_data(train_data, args.max_memory) test_data = memnn.convert_data(test_data, args.max_memory) encoder = memnn.make_encoder(args.sentence_repr) network = memnn.MemNN( args.unit, len(vocab), encoder, args.max_memory, args.hop) model = chainer.links.Classifier(network, label_key='answer') opt = chainer.optimizers.Adam() if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() model.to_gpu() opt.setup(model) train_iter = chainer.iterators.SerialIterator( train_data, args.batchsize) test_iter = chainer.iterators.SerialIterator( test_data, args.batchsize, repeat=False, shuffle=False) updater = chainer.training.StandardUpdater( train_iter, opt, device=args.gpu) trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch')) @chainer.training.make_extension() def fix_ignore_label(trainer): network.fix_ignore_label() trainer.extend(fix_ignore_label) trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy'])) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run() if args.model: memnn.save_model(args.model, model, vocab)
Example #8
Source File: train.py From Video-frame-prediction-by-multi-scale-GAN with MIT License | 4 votes |
def main(resume, gpu, load_path, data_path): dataset = Dataset(data_path) GenNetwork = MultiScaleGenerator(c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G) DisNetwork = MultiScaleDiscriminator(c.SCALE_CONV_FMS_D, c.SCALE_KERNEL_SIZES_D, c.SCALE_FC_LAYER_SIZES_D) optimizers = {} optimizers["GeneratorNetwork"] = chainer.optimizers.SGD(c.LRATE_G) optimizers["DiscriminatorNetwork"] = chainer.optimizers.SGD(c.LRATE_D) iterator = chainer.iterators.SerialIterator(dataset, 1) params = {'LAM_ADV': 0.05, 'LAM_LP': 1, 'LAM_GDL': .1} updater = Updater(iterators=iterator, optimizers=optimizers, GeneratorNetwork=GenNetwork, DiscriminatorNetwork=DisNetwork, params=params, device=gpu ) if gpu>=0: updater.GenNetwork.to_gpu() updater.DisNetwork.to_gpu() trainer = chainer.training.Trainer(updater, (500000, 'iteration'), out='result') trainer.extend(extensions.snapshot(filename='snapshot'), trigger=(1, 'iteration')) trainer.extend(extensions.snapshot_object(trainer.updater.GenNetwork, "GEN")) trainer.extend(saveGen) log_keys = ['epoch', 'iteration', 'GeneratorNetwork/L2Loss', 'GeneratorNetwork/GDL', 'DiscriminatorNetwork/DisLoss', 'GeneratorNetwork/CompositeGenLoss'] print_keys = ['GeneratorNetwork/CompositeGenLoss','DiscriminatorNetwork/DisLoss'] trainer.extend(extensions.LogReport(keys=log_keys, trigger=(10, 'iteration'))) trainer.extend(extensions.PrintReport(print_keys), trigger=(10, 'iteration')) trainer.extend(extensions.PlotReport(['DiscriminatorNetwork/DisLoss'], 'iteration', (10, 'iteration'), file_name="DisLoss.png")) trainer.extend(extensions.PlotReport(['GeneratorNetwork/CompositeGenLoss'], 'iteration', (10, 'iteration'), file_name="GenLoss.png")) trainer.extend(extensions.PlotReport(['GeneratorNetwork/AdvLoss'], 'iteration', (10, 'iteration'), file_name="AdvGenLoss.png")) trainer.extend(extensions.PlotReport(['GeneratorNetwork/AdvLoss','DiscriminatorNetwork/DisLoss'], 'iteration', (10, 'iteration'), file_name="AdversarialLosses.png")) trainer.extend(extensions.PlotReport(['GeneratorNetwork/L2Loss'], 'iteration', (10, 'iteration'),file_name="L2Loss.png")) trainer.extend(extensions.PlotReport(['GeneratorNetwork/GDL'], 'iteration', (10, 'iteration'),file_name="GDL.png")) trainer.extend(extensions.ProgressBar(update_interval=10)) if resume: # Resume from a snapshot chainer.serializers.load_npz(load_path, trainer) print(trainer.updater.__dict__) trainer.run()
Example #9
Source File: trainer.py From Comicolorization with MIT License | 4 votes |
def create_trainer( config, project_path, updater, model, eval_func, iterator_test, iterator_train_varidation, loss_names, converter=chainer.dataset.convert.concat_examples, ): # type: (TrainConfig, str, any, typing.Dict, any, any, any, any, any) -> any def _make_evaluator(iterator): return utility.chainer_utility.NoVariableEvaluator( iterator, target=model, converter=converter, eval_func=eval_func, device=config.gpu, ) trainer = chainer.training.Trainer(updater, out=project_path) log_trigger = (config.log_iteration, 'iteration') save_trigger = (config.save_iteration, 'iteration') eval_test_name = 'eval/test' eval_train_name = 'eval/train' snapshot = extensions.snapshot_object(model['main'], '{.updater.iteration}.model') trainer.extend(snapshot, trigger=save_trigger) trainer.extend(extensions.dump_graph('main/' + loss_names[0], out_name='main.dot')) trainer.extend(_make_evaluator(iterator_test), name=eval_test_name, trigger=log_trigger) trainer.extend(_make_evaluator(iterator_train_varidation), name=eval_train_name, trigger=log_trigger) report_target = [] for evaluator_name in ['', eval_test_name + '/', eval_train_name + '/']: for model_name in ['main/']: for loss_name in loss_names: report_target.append(evaluator_name + model_name + loss_name) trainer.extend(extensions.LogReport(trigger=log_trigger, log_name="log.txt")) trainer.extend(extensions.PrintReport(report_target)) return trainer
Example #10
Source File: train.py From ConvLSTM with MIT License | 4 votes |
def train(): parser = argparse.ArgumentParser() parser.add_argument('--gpu', '-g', type=int, default=-1) parser.add_argument('--model', '-m', type=str, default=None) parser.add_argument('--opt', type=str, default=None) parser.add_argument('--epoch', '-e', type=int, default=3) parser.add_argument('--lr', '-l', type=float, default=0.001) parser.add_argument('--inf', type=int, default=10) parser.add_argument('--outf', type=int, default=10) parser.add_argument('--batch', '-b', type=int, default=8) args = parser.parse_args() train = dataset.MovingMnistDataset(0, 7000, args.inf, args.outf) train_iter = iterators.SerialIterator(train, batch_size=args.batch, shuffle=True) test = dataset.MovingMnistDataset(7000, 10000, args.inf, args.outf) test_iter = iterators.SerialIterator(test, batch_size=args.batch, repeat=False, shuffle=False) model = network.MovingMnistNetwork(sz=[128,64,64], n=2) if args.model != None: print( "loading model from " + args.model ) serializers.load_npz(args.model, model) if args.gpu >= 0: cuda.get_device_from_id(0).use() model.to_gpu() opt = optimizers.Adam(alpha=args.lr) opt.setup(model) if args.opt != None: print( "loading opt from " + args.opt ) serializers.load_npz(args.opt, opt) updater = training.StandardUpdater(train_iter, opt, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out='results') trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) trainer.extend(extensions.LogReport(trigger=(10, 'iteration'))) trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss'])) trainer.extend(extensions.ProgressBar(update_interval=1)) trainer.run() modelname = "./results/model" print( "saving model to " + modelname ) serializers.save_npz(modelname, model) optname = "./results/opt" print( "saving opt to " + optname ) serializers.save_npz(optname, opt)
Example #11
Source File: train.py From portrait_matting with GNU General Public License v3.0 | 4 votes |
def register_extensions(trainer, model, test_iter, args): if args.mode.startswith('seg'): # Max accuracy best_trigger = training.triggers.BestValueTrigger( 'validation/main/accuracy', lambda a, b: a < b, (1, 'epoch')) elif args.mode.startswith('mat'): # Min loss best_trigger = training.triggers.BestValueTrigger( 'validation/main/loss', lambda a, b: a > b, (1, 'epoch')) else: logger.error('Invalid training mode') # Segmentation extensions trainer.extend( custom_extensions.PortraitVisEvaluator( test_iter, model, device=args.gpus[0], converter=select_converter(args.mode), filename='vis_epoch={epoch}_idx={index}.jpg', mode=args.mode ), trigger=(1, 'epoch')) # Basic extensions trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.LogReport(trigger=(200, 'iteration'))) trainer.extend(extensions.ProgressBar(update_interval=20)) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'lr', 'elapsed_time'])) trainer.extend(extensions.observe_lr(), trigger=(200, 'iteration')) # Snapshots trainer.extend(extensions.snapshot( filename='snapshot_epoch_{.updater.epoch}' ), trigger=(5, 'epoch')) trainer.extend(extensions.snapshot_object( model, filename='model_best' ), trigger=best_trigger) # ChainerUI extensions trainer.extend(chainerui.extensions.CommandsExtension()) chainerui.utils.save_args(args, args.out) # Plotting extensions if extensions.PlotReport.available(): trainer.extend( extensions.PlotReport( ['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport( ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
Example #12
Source File: demo_nnpu_chainer.py From pywsl with MIT License | 4 votes |
def main(): gpu, out = -1, "result" stepsize = 0.001 batchsize, epoch = 10000, 10 beta, gamma = 0., 1. data_id, prior = 0, .5 n_p, n_n, n_u, n_t, n_vp, n_vn, n_vu = 100, 0, 10000, 100, 20, 20, 100 data_name, x_p, x_n, x_u, y_u, x_t, y_t, x_vp, x_vn, x_vu, y_vu \ = load_dataset(data_id, n_p, n_n, n_u, prior, n_t, n_vp=n_vp, n_vn=n_vn, n_vu=n_vu) x_p, x_n, x_u, x_t, x_vp, x_vn, x_vu = x_p.astype(np.float32), x_n.astype(np.float32), \ x_u.astype(np.float32), x_t.astype(np.float32), x_vp.astype(np.float32), \ x_vn.astype(np.float32), x_vu.astype(np.float32), XYtrain = TupleDataset(np.r_[x_p, x_u], np.r_[np.ones(100), np.zeros(10000)].astype(np.int32)) XYtest = TupleDataset(np.r_[x_vp, x_vu], np.r_[np.ones(20), np.zeros(100)].astype(np.int32)) train_iter = chainer.iterators.SerialIterator(XYtrain, batchsize) test_iter = chainer.iterators.SerialIterator(XYtest, batchsize, repeat=False, shuffle=False) loss_type = lambda x: F.sigmoid(-x) nnpu_risk = PU_Risk(prior, loss=loss_type, nnPU=True, gamma=gamma, beta=beta) pu_acc = PU_Accuracy(prior) model = L.Classifier(MLP(), lossfun=nnpu_risk, accfun=pu_acc) if gpu >= 0: chainer.backends.cuda.get_device_from_id(gpu).use() model.to_gpu(gpu) optimizer = chainer.optimizers.Adam(alpha=stepsize) optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(0.005)) updater = chainer.training.StandardUpdater(train_iter, optimizer, device=gpu) trainer = chainer.training.Trainer(updater, (epoch, 'epoch'), out=out) trainer.extend(extensions.LogReport(trigger=(1, 'epoch'))) trainer.extend(extensions.Evaluator(test_iter, model, device=gpu)) trainer.extend(extensions.ProgressBar()) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) key = 'validation/main/accuracy' model_name = 'model' trainer.extend(extensions.snapshot_object(model, model_name), trigger=chainer.training.triggers.MaxValueTrigger(key)) if extensions.PlotReport.available(): trainer.extend( extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name=f'loss_curve.png')) trainer.extend( extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name=f'accuracy_curve.png')) trainer.run() yh = pred(model, x_t, batchsize, gpu) mr = prior*np.mean(yh[y_t == +1] <= 0) + (1-prior)*np.mean(yh[y_t == -1] >= 0) print("mr: {}".format(mr))
Example #13
Source File: train.py From models with MIT License | 4 votes |
def train_one_epoch(model, train_data, lr, gpu, batchsize, out): train_model = PixelwiseSoftmaxClassifier(model) if gpu >= 0: # Make a specified GPU current chainer.cuda.get_device_from_id(gpu).use() train_model.to_gpu() # Copy the model to the GPU log_trigger = (0.1, 'epoch') validation_trigger = (1, 'epoch') end_trigger = (1, 'epoch') train_data = TransformDataset( train_data, ('img', 'label_map'), SimpleDoesItTransform(model.mean)) val = VOCSemanticSegmentationWithBboxDataset( split='val').slice[:, ['img', 'label_map']] # Iterator train_iter = iterators.MultiprocessIterator(train_data, batchsize) val_iter = iterators.MultiprocessIterator( val, 1, shuffle=False, repeat=False, shared_mem=100000000) # Optimizer optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9) optimizer.setup(train_model) optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0001)) # Updater updater = training.updaters.StandardUpdater( train_iter, optimizer, device=gpu) # Trainer trainer = training.Trainer(updater, end_trigger, out=out) trainer.extend(extensions.LogReport(trigger=log_trigger)) trainer.extend(extensions.observe_lr(), trigger=log_trigger) trainer.extend(extensions.dump_graph('main/loss')) if extensions.PlotReport.available(): trainer.extend(extensions.PlotReport( ['main/loss'], x_key='iteration', file_name='loss.png')) trainer.extend(extensions.PlotReport( ['validation/main/miou'], x_key='iteration', file_name='miou.png')) trainer.extend(extensions.snapshot_object( model, filename='snapshot.npy'), trigger=end_trigger) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'elapsed_time', 'lr', 'main/loss', 'validation/main/miou', 'validation/main/mean_class_accuracy', 'validation/main/pixel_accuracy']), trigger=log_trigger) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.extend( SemanticSegmentationEvaluator( val_iter, model, voc_semantic_segmentation_label_names), trigger=validation_trigger) trainer.run()
Example #14
Source File: train_utils.py From see with GNU General Public License v3.0 | 4 votes |
def get_trainer(net, updater, log_dir, print_fields, curriculum=None, extra_extensions=(), epochs=10, snapshot_interval=20000, print_interval=100, postprocess=None, do_logging=True, model_files=()): if curriculum is None: trainer = chainer.training.Trainer( updater, (epochs, 'epoch'), out=log_dir, ) else: trainer = chainer.training.Trainer( updater, EarlyStopIntervalTrigger(epochs, 'epoch', curriculum), out=log_dir, ) # dump computational graph trainer.extend(extensions.dump_graph('main/loss')) # also observe learning rate observe_lr_extension = chainer.training.extensions.observe_lr() observe_lr_extension.trigger = (print_interval, 'iteration') trainer.extend(observe_lr_extension) # Take snapshots trainer.extend( extensions.snapshot(filename="trainer_snapshot"), trigger=lambda trainer: trainer.updater.is_new_epoch or (trainer.updater.iteration > 0 and trainer.updater.iteration % snapshot_interval == 0) ) if do_logging: # write all statistics to a file trainer.extend(Logger(model_files, log_dir, keys=print_fields, trigger=(print_interval, 'iteration'), postprocess=postprocess)) # print some interesting statistics trainer.extend(extensions.PrintReport( print_fields, log_report='Logger', )) # Progressbar!! trainer.extend(extensions.ProgressBar(update_interval=1)) for extra_extension, trigger in extra_extensions: trainer.extend(extra_extension, trigger=trigger) return trainer
Example #15
Source File: train_fcn32s.py From fcn with MIT License | 4 votes |
def get_trainer(optimizer, iter_train, iter_valid, iter_valid_raw, class_names, args): model = optimizer.target updater = chainer.training.StandardUpdater( iter_train, optimizer, device=args.gpu) trainer = chainer.training.Trainer( updater, (args.max_iteration, 'iteration'), out=args.out) trainer.extend(fcn.extensions.ParamsReport(args.__dict__)) trainer.extend(extensions.ProgressBar(update_interval=5)) trainer.extend(extensions.LogReport( trigger=(args.interval_print, 'iteration'))) trainer.extend(extensions.PrintReport( ['epoch', 'iteration', 'elapsed_time', 'main/loss', 'validation/main/miou'])) def pred_func(x): model(x) return model.score trainer.extend( fcn.extensions.SemanticSegmentationVisReport( pred_func, iter_valid_raw, transform=fcn.datasets.transform_lsvrc2012_vgg16, class_names=class_names, device=args.gpu, shape=(4, 2)), trigger=(args.interval_eval, 'iteration')) trainer.extend( chainercv.extensions.SemanticSegmentationEvaluator( iter_valid, model, label_names=class_names), trigger=(args.interval_eval, 'iteration')) trainer.extend(extensions.snapshot_object( target=model, filename='model_best.npz'), trigger=chainer.training.triggers.MaxValueTrigger( key='validation/main/miou', trigger=(args.interval_eval, 'iteration'))) assert extensions.PlotReport.available() trainer.extend(extensions.PlotReport( y_keys=['main/loss'], x_key='iteration', file_name='loss.png', trigger=(args.interval_print, 'iteration'))) trainer.extend(extensions.PlotReport( y_keys=['validation/main/miou'], x_key='iteration', file_name='miou.png', trigger=(args.interval_print, 'iteration'))) return trainer
Example #16
Source File: train_memnn.py From chainer with MIT License | 4 votes |
def train(train_data_path, test_data_path, args): device = chainer.get_device(args.device) device.use() vocab = collections.defaultdict(lambda: len(vocab)) vocab['<unk>'] = 0 train_data = babi.read_data(vocab, train_data_path) test_data = babi.read_data(vocab, test_data_path) print('Training data: %s: %d' % (train_data_path, len(train_data))) print('Test data: %s: %d' % (test_data_path, len(test_data))) train_data = memnn.convert_data(train_data, args.max_memory) test_data = memnn.convert_data(test_data, args.max_memory) encoder = memnn.make_encoder(args.sentence_repr) network = memnn.MemNN( args.unit, len(vocab), encoder, args.max_memory, args.hop) model = chainer.links.Classifier(network, label_key='answer') opt = chainer.optimizers.Adam() model.to_device(device) opt.setup(model) train_iter = chainer.iterators.SerialIterator( train_data, args.batchsize) test_iter = chainer.iterators.SerialIterator( test_data, args.batchsize, repeat=False, shuffle=False) updater = chainer.training.StandardUpdater(train_iter, opt, device=device) trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch')) @chainer.training.make_extension() def fix_ignore_label(trainer): network.fix_ignore_label() trainer.extend(fix_ignore_label) trainer.extend(extensions.Evaluator(test_iter, model, device=device)) trainer.extend(extensions.LogReport()) trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy'])) trainer.extend(extensions.ProgressBar(update_interval=10)) trainer.run() if args.model: memnn.save_model(args.model, model, vocab)
Example #17
Source File: train_ch_in1k.py From imgclsmob with MIT License | 4 votes |
def prepare_trainer(net, optimizer_name, lr, momentum, num_epochs, train_iter, val_iter, logging_dir_path, num_gpus=0): if optimizer_name == "sgd": optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum) elif optimizer_name == "nag": optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum) else: raise Exception('Unsupported optimizer: {}'.format(optimizer_name)) optimizer.setup(net) # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, ) devices = (0,) if num_gpus > 0 else (-1,) updater = training.updaters.StandardUpdater( iterator=train_iter, optimizer=optimizer, device=devices[0]) trainer = training.Trainer( updater=updater, stop_trigger=(num_epochs, 'epoch'), out=logging_dir_path) val_interval = 100000, 'iteration' log_interval = 1000, 'iteration' trainer.extend( extension=extensions.Evaluator( val_iter, net, device=devices[0]), trigger=val_interval) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend( extensions.snapshot_object( net, 'model_iter_{.updater.iteration}'), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend( extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'lr']), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) return trainer
Example #18
Source File: train_ch_cifar.py From imgclsmob with MIT License | 4 votes |
def prepare_trainer(net, optimizer_name, lr, momentum, num_epochs, train_iter, val_iter, logging_dir_path, num_gpus=0): if optimizer_name == "sgd": optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum) elif optimizer_name == "nag": optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum) else: raise Exception('Unsupported optimizer: {}'.format(optimizer_name)) optimizer.setup(net) # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, ) devices = (0,) if num_gpus > 0 else (-1,) updater = training.updaters.StandardUpdater( iterator=train_iter, optimizer=optimizer, device=devices[0]) trainer = training.Trainer( updater=updater, stop_trigger=(num_epochs, 'epoch'), out=logging_dir_path) val_interval = 100000, 'iteration' log_interval = 1000, 'iteration' trainer.extend( extension=extensions.Evaluator( val_iter, net, device=devices[0]), trigger=val_interval) trainer.extend(extensions.dump_graph('main/loss')) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend( extensions.snapshot_object( net, 'model_iter_{.updater.iteration}'), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend( extensions.PrintReport([ 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'lr']), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) return trainer
Example #19
Source File: train_ch.py From imgclsmob with MIT License | 4 votes |
def prepare_trainer(net, optimizer_name, lr, momentum, num_epochs, train_data, val_data, logging_dir_path, use_gpus): if optimizer_name == "sgd": optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum) elif optimizer_name == "nag": optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum) else: raise Exception("Unsupported optimizer: {}".format(optimizer_name)) optimizer.setup(net) # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, ) devices = (0,) if use_gpus else (-1,) updater = training.updaters.StandardUpdater( iterator=train_data["iterator"], optimizer=optimizer, device=devices[0]) trainer = training.Trainer( updater=updater, stop_trigger=(num_epochs, "epoch"), out=logging_dir_path) val_interval = 100000, "iteration" log_interval = 1000, "iteration" trainer.extend( extension=extensions.Evaluator( iterator=val_data["iterator"], target=net, device=devices[0]), trigger=val_interval) trainer.extend(extensions.dump_graph("main/loss")) trainer.extend(extensions.snapshot(), trigger=val_interval) trainer.extend( extensions.snapshot_object( net, "model_iter_{.updater.iteration}"), trigger=val_interval) trainer.extend(extensions.LogReport(trigger=log_interval)) trainer.extend(extensions.observe_lr(), trigger=log_interval) trainer.extend( extensions.PrintReport([ "epoch", "iteration", "main/loss", "validation/main/loss", "main/accuracy", "validation/main/accuracy", "lr"]), trigger=log_interval) trainer.extend(extensions.ProgressBar(update_interval=10)) return trainer
Example #20
Source File: gen_mnist_mlp.py From chainer-compiler with MIT License | 4 votes |
def run_training(args, model): trainer = create_trainer(args, model) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend(extensions.dump_graph('main/loss')) # Take a snapshot for each specified epoch frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Save two plot images to the result dir if args.plot and extensions.PlotReport.available(): trainer.extend( extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport( ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Run the training trainer.run()
Example #21
Source File: gen_mnist_mlp.py From chainer-compiler with MIT License | 4 votes |
def main_impl(args): # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. model = MLP(args.unit, 10) # classifier = L.Classifier(model) classifier = MyClassifier(model, compute_accuracy=args.run_training) model = classifier if args.gpu >= 0: # Make a specified GPU current chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU if args.run_training: run_training(args, model) return out_dir = 'out/backprop_test_mnist_mlp' x = np.random.random((args.batchsize, 784)).astype(np.float32) y = (np.random.random(args.batchsize) * 10).astype(np.int32) onehot = np.eye(10, dtype=x.dtype)[y] x = chainer.Variable(x, name='input') onehot = chainer.Variable(onehot, name='onehot') chainer.disable_experimental_feature_warning = True shutil.rmtree(out_dir, ignore_errors=True) onnx_chainer.export_testcase(model, (x, onehot), out_dir, output_grad=True, output_names='loss') # Revive this code if we need to test parameter update. # # trainer = create_trainer(args, model) # for step in range(2): # trainer.updater.update() # npz_filename = '%s/params_%d.npz' % (out_dir, step) # params_dir = '%s/params_%d' % (out_dir, step) # chainer.serializers.save_npz(npz_filename, model) # makedirs(params_dir) # npz_to_onnx.npz_to_onnx(npz_filename, os.path.join(params_dir, 'param'))
Example #22
Source File: gen_resnet50.py From chainer-compiler with MIT License | 4 votes |
def run_training(args, model): trainer = create_trainer(args, model) # Dump a computational graph from 'loss' variable at the first iteration # The "main" refers to the target link of the "main" optimizer. trainer.extend(extensions.dump_graph('main/loss')) # Take a snapshot for each specified epoch frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Save two plot images to the result dir if args.plot and extensions.PlotReport.available(): trainer.extend( extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend( extensions.PlotReport( ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) # Print a progress bar to stdout trainer.extend(extensions.ProgressBar()) if args.resume: # Resume from a snapshot chainer.serializers.load_npz(args.resume, trainer) # Run the training trainer.run()
Example #23
Source File: mnist.py From cloudml-samples with Apache License 2.0 | 4 votes |
def main(): # Training settings args = get_args() # Set up a neural network to train model = L.Classifier(Net()) if args.gpu >= 0: # Make a specified GPU current chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU # Setup an optimizer optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum) optimizer.setup(model) # Load the MNIST dataset train, test = chainer.datasets.get_mnist(ndim=3) train_iter = chainer.iterators.SerialIterator(train, args.batch_size) test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size, repeat=False, shuffle=False) # Set up a trainer updater = training.updaters.StandardUpdater( train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epochs, 'epoch')) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Print selected entries of the log to stdout trainer.extend(extensions.PrintReport( ['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) # Send selected entries of the log to CMLE HP tuning system trainer.extend( HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss')) if args.resume: # Resume from a snapshot tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME) if not os.path.exists(tmp_model_file): subprocess.check_call([ 'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME), tmp_model_file]) if os.path.exists(tmp_model_file): chainer.serializers.load_npz(tmp_model_file, trainer) trainer.run() if args.model_dir: tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME) serializers.save_npz(tmp_model_file, model) subprocess.check_call([ 'gsutil', 'cp', tmp_model_file, os.path.join(args.model_dir, MODEL_FILE_NAME)])