Python chainer.training.extensions.Evaluator() Examples

The following are 20 code examples of chainer.training.extensions.Evaluator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.training.extensions , or try the search function .
Example #1
Source File: gen_mnist_mlp.py    From chainer-compiler with MIT License 6 votes vote down vote up
def create_trainer(args, model):
    # Setup an optimizer
    # optimizer = chainer.optimizers.Adam()
    optimizer = chainer.optimizers.SGD()
    optimizer.setup(model)

    # Load the MNIST dataset
    train, test = chainer.datasets.get_mnist()

    train_iter = MyIterator(train, args.batchsize, shuffle=False)
    test_iter = MyIterator(test, args.batchsize, repeat=False, shuffle=False)

    # Set up a trainer
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    return trainer 
Example #2
Source File: predict_own_dataset.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def main():
    # Parse the arguments.
    args = parse_arguments()

    if args.label:
        labels = args.label
    else:
        raise ValueError('No target label was specified.')

    # Dataset preparation.
    def postprocess_label(label_list):
        return numpy.asarray(label_list, dtype=numpy.float32)

    print('Preprocessing dataset...')
    preprocessor = preprocess_method_dict[args.method]()
    parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label,
                           labels=labels, smiles_col='SMILES')
    dataset = parser.parse(args.datafile)['dataset']

    test = dataset

    print('Predicting...')
    # Set up the regressor.
    device = chainer.get_device(args.device)
    model_path = os.path.join(args.in_dir, args.model_filename)
    regressor = Regressor.load_pickle(model_path, device=device)

    # Perform the prediction.
    print('Evaluating...')
    converter = converter_method_dict[args.method]
    test_iterator = SerialIterator(test, 16, repeat=False, shuffle=False)
    eval_result = Evaluator(test_iterator, regressor, converter=converter,
                            device=device)()
    print('Evaluation result: ', eval_result)

    save_json(os.path.join(args.in_dir, 'eval_result.json'), eval_result) 
Example #3
Source File: gen_resnet50.py    From chainer-compiler with MIT License 5 votes vote down vote up
def create_trainer(args, model):
    # Setup an optimizer
    #optimizer = chainer.optimizers.Adam()
    optimizer = chainer.optimizers.SGD()
    optimizer.setup(model)

    # Load the datasets and mean file
    mean = np.load(args.mean)

    train = PreprocessedDataset(args.train, args.root, mean, insize)
    val = PreprocessedDataset(args.val, args.root, mean, insize, False)

    # These iterators load the images with subprocesses running in parallel to
    # the training/validation.
    train_iter = MyIterator(
        train, args.batchsize, n_processes=args.loaderjob)
    val_iter = MyIterator(
        val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)

    # Set up a trainer
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(val_iter, model, device=args.gpu))
    return trainer 
Example #4
Source File: test_evaluator.py    From chainer with MIT License 5 votes vote down vote up
def setUp(self):
        self.data = [
            numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]

        self.iterator = iterators.SerialIterator(
            self.data, 1, repeat=False, shuffle=False)
        self.target = DummyModel(self)
        self.evaluator = extensions.Evaluator(
            self.iterator, {}, eval_func=self.target, progress_bar=True) 
Example #5
Source File: test_evaluator.py    From chainer with MIT License 5 votes vote down vote up
def test_user_warning(self):
        dataset = numpy.ones((4, 6))
        iterator = self.iterator_class(dataset, 2, repeat=self.repeat)
        if self.repeat:
            with testing.assert_warns(UserWarning):
                extensions.Evaluator(iterator, {}) 
Example #6
Source File: train_imagenet.py    From chainer with MIT License 5 votes vote down vote up
def get_example(self, i):
        # It reads the i-th image/label pair and return a preprocessed image.
        # It applies following preprocesses:
        #     - Cropping (random or center rectangular)
        #     - Random flip
        #     - Scaling to [0, 1] value
        crop_size = self.crop_size

        image, label = self.base[i]
        _, h, w = image.shape

        if self.random:
            # Randomly crop a region and flip the image
            top = random.randint(0, h - crop_size - 1)
            left = random.randint(0, w - crop_size - 1)
            if random.randint(0, 1):
                image = image[:, :, ::-1]
        else:
            # Crop the center
            top = (h - crop_size) // 2
            left = (w - crop_size) // 2
        bottom = top + crop_size
        right = left + crop_size

        image = image[:, top:bottom, left:right]
        image -= self.mean[:, top:bottom, left:right]
        image *= (1.0 / 255.0)  # Scale to [0, 1]
        return image, label


# chainermn.create_multi_node_evaluator can be also used with user customized
# evaluator classes that inherit chainer.training.extensions.Evaluator. 
Example #7
Source File: train.py    From qb with MIT License 5 votes vote down vote up
def main(model):
    train = read_data(fold=BUZZER_TRAIN_FOLD)
    valid = read_data(fold=BUZZER_DEV_FOLD)
    print('# train data: {}'.format(len(train)))
    print('# valid data: {}'.format(len(valid)))

    train_iter = chainer.iterators.SerialIterator(train, 64)
    valid_iter = chainer.iterators.SerialIterator(valid, 64, repeat=False, shuffle=False)

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))

    updater = training.updaters.StandardUpdater(train_iter, optimizer, converter=convert_seq, device=0)
    trainer = training.Trainer(updater, (20, 'epoch'), out=model.model_dir)

    trainer.extend(extensions.Evaluator(valid_iter, model, converter=convert_seq, device=0))

    record_trigger = training.triggers.MaxValueTrigger('validation/main/accuracy', (1, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'buzzer.npz'), trigger=record_trigger)

    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.PrintReport([
        'epoch', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'elapsed_time'
    ]))

    if not os.path.isdir(model.model_dir):
        os.mkdir(model.model_dir)

    trainer.run() 
Example #8
Source File: test_evaluator.py    From chainer with MIT License 5 votes vote down vote up
def setUp(self):
        self.data = [
            numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
        self.batches = [
            numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
            for _ in range(2)]

        self.iterator = DummyIterator(self.data)
        self.converter = DummyConverter(self.batches)
        self.target = DummyModel(self)
        self.evaluator = extensions.Evaluator(
            self.iterator, self.target, converter=self.converter)
        self.expect_mean = numpy.mean([numpy.sum(x) for x in self.batches]) 
Example #9
Source File: test_evaluator.py    From chainer with MIT License 5 votes vote down vote up
def prepare(self, data, batches, device):
        iterator = DummyIterator(data)
        converter = DummyConverter(batches)
        target = DummyModelTwoArgs(self)
        evaluator = extensions.Evaluator(
            iterator, target, converter=converter, device=device)
        return iterator, converter, target, evaluator 
Example #10
Source File: test_evaluator.py    From chainer with MIT License 5 votes vote down vote up
def setUp(self):
        self.data = range(2)
        self.batches = [
            {'x': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
             'y': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')}
            for _ in range(2)]

        self.iterator = DummyIterator(self.data)
        self.converter = DummyConverter(self.batches)
        self.target = DummyModelTwoArgs(self)
        self.evaluator = extensions.Evaluator(
            self.iterator, self.target, converter=self.converter) 
Example #11
Source File: train_memnn.py    From chainer with MIT License 4 votes vote down vote up
def train(train_data_path, test_data_path, args):
    device = chainer.get_device(args.device)
    device.use()

    vocab = collections.defaultdict(lambda: len(vocab))
    vocab['<unk>'] = 0

    train_data = babi.read_data(vocab, train_data_path)
    test_data = babi.read_data(vocab, test_data_path)
    print('Training data: %s: %d' % (train_data_path, len(train_data)))
    print('Test data: %s: %d' % (test_data_path, len(test_data)))

    train_data = memnn.convert_data(train_data, args.max_memory)
    test_data = memnn.convert_data(test_data, args.max_memory)

    encoder = memnn.make_encoder(args.sentence_repr)
    network = memnn.MemNN(
        args.unit, len(vocab), encoder, args.max_memory, args.hop)
    model = chainer.links.Classifier(network, label_key='answer')
    opt = chainer.optimizers.Adam()

    model.to_device(device)

    opt.setup(model)

    train_iter = chainer.iterators.SerialIterator(
        train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(
        test_data, args.batchsize, repeat=False, shuffle=False)
    updater = chainer.training.StandardUpdater(train_iter, opt, device=device)
    trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))

    @chainer.training.make_extension()
    def fix_ignore_label(trainer):
        network.fix_ignore_label()

    trainer.extend(fix_ignore_label)
    trainer.extend(extensions.Evaluator(test_iter, model, device=device))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.run()

    if args.model:
        memnn.save_model(args.model, model, vocab) 
Example #12
Source File: train.py    From ConvLSTM with MIT License 4 votes vote down vote up
def train():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', '-g', type=int, default=-1)
    parser.add_argument('--model', '-m', type=str, default=None)
    parser.add_argument('--opt', type=str, default=None)
    parser.add_argument('--epoch', '-e', type=int, default=3)
    parser.add_argument('--lr', '-l', type=float, default=0.001)
    parser.add_argument('--inf', type=int, default=10)
    parser.add_argument('--outf', type=int, default=10)
    parser.add_argument('--batch', '-b', type=int, default=8)
    args = parser.parse_args()

    train = dataset.MovingMnistDataset(0, 7000, args.inf, args.outf)
    train_iter = iterators.SerialIterator(train, batch_size=args.batch, shuffle=True)
    test = dataset.MovingMnistDataset(7000, 10000, args.inf, args.outf)
    test_iter = iterators.SerialIterator(test, batch_size=args.batch, repeat=False, shuffle=False)

    model = network.MovingMnistNetwork(sz=[128,64,64], n=2)

    if args.model != None:
        print( "loading model from " + args.model )
        serializers.load_npz(args.model, model)

    if args.gpu >= 0:
        cuda.get_device_from_id(0).use()
        model.to_gpu()

    opt = optimizers.Adam(alpha=args.lr)
    opt.setup(model)

    if args.opt != None:
        print( "loading opt from " + args.opt )
        serializers.load_npz(args.opt, opt)

    updater = training.StandardUpdater(train_iter, opt, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out='results')

    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.LogReport(trigger=(10, 'iteration')))
    trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss']))
    trainer.extend(extensions.ProgressBar(update_interval=1))

    trainer.run()

    modelname = "./results/model"
    print( "saving model to " + modelname )
    serializers.save_npz(modelname, model)

    optname = "./results/opt"
    print( "saving opt to " + optname )
    serializers.save_npz(optname, opt) 
Example #13
Source File: demo_nnpu_chainer.py    From pywsl with MIT License 4 votes vote down vote up
def main():
    gpu, out = -1, "result"
    stepsize = 0.001
    batchsize, epoch = 10000, 10
    beta, gamma = 0., 1.

    data_id, prior = 0, .5
    n_p, n_n, n_u, n_t, n_vp, n_vn, n_vu = 100, 0, 10000, 100, 20, 20, 100
    data_name, x_p, x_n, x_u, y_u, x_t, y_t, x_vp, x_vn, x_vu, y_vu \
        = load_dataset(data_id, n_p, n_n, n_u, prior, n_t, n_vp=n_vp, n_vn=n_vn, n_vu=n_vu)

    x_p, x_n, x_u, x_t, x_vp, x_vn, x_vu = x_p.astype(np.float32), x_n.astype(np.float32), \
        x_u.astype(np.float32), x_t.astype(np.float32), x_vp.astype(np.float32), \
        x_vn.astype(np.float32), x_vu.astype(np.float32), 
    XYtrain = TupleDataset(np.r_[x_p, x_u], np.r_[np.ones(100), np.zeros(10000)].astype(np.int32))
    XYtest = TupleDataset(np.r_[x_vp, x_vu], np.r_[np.ones(20), np.zeros(100)].astype(np.int32))
    train_iter = chainer.iterators.SerialIterator(XYtrain, batchsize)
    test_iter = chainer.iterators.SerialIterator(XYtest, batchsize, repeat=False, shuffle=False)

    loss_type = lambda x: F.sigmoid(-x)
    nnpu_risk = PU_Risk(prior, loss=loss_type, nnPU=True, gamma=gamma, beta=beta)
    pu_acc = PU_Accuracy(prior)

    model = L.Classifier(MLP(), lossfun=nnpu_risk, accfun=pu_acc)
    if gpu >= 0:
        chainer.backends.cuda.get_device_from_id(gpu).use()
        model.to_gpu(gpu)

    optimizer = chainer.optimizers.Adam(alpha=stepsize)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.005))

    updater = chainer.training.StandardUpdater(train_iter, optimizer, device=gpu)
    trainer = chainer.training.Trainer(updater, (epoch, 'epoch'), out=out)
    trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
    trainer.extend(extensions.Evaluator(test_iter, model, device=gpu))
    trainer.extend(extensions.ProgressBar())
    trainer.extend(extensions.PrintReport(
                ['epoch', 'main/loss', 'validation/main/loss',
                 'main/accuracy', 'validation/main/accuracy', 
                 'elapsed_time']))
    key = 'validation/main/accuracy'
    model_name = 'model'
    trainer.extend(extensions.snapshot_object(model, model_name),
                   trigger=chainer.training.triggers.MaxValueTrigger(key))
    if extensions.PlotReport.available():
            trainer.extend(
                extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name=f'loss_curve.png'))
            trainer.extend(
                extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 
                                      'epoch', file_name=f'accuracy_curve.png'))


    trainer.run()

    yh = pred(model, x_t, batchsize, gpu)
    mr = prior*np.mean(yh[y_t == +1] <= 0) + (1-prior)*np.mean(yh[y_t == -1] >= 0)
    print("mr: {}".format(mr)) 
Example #14
Source File: train_memnn.py    From pfio with MIT License 4 votes vote down vote up
def train(train_data_path, test_data_path, args):
    vocab = collections.defaultdict(lambda: len(vocab))
    vocab['<unk>'] = 0

    train_data = babi.read_data(vocab, train_data_path)
    test_data = babi.read_data(vocab, test_data_path)
    print('Training data: %s: %d' % (train_data_path, len(train_data)))
    print('Test data: %s: %d' % (test_data_path, len(test_data)))

    train_data = memnn.convert_data(train_data, args.max_memory)
    test_data = memnn.convert_data(test_data, args.max_memory)

    encoder = memnn.make_encoder(args.sentence_repr)
    network = memnn.MemNN(
        args.unit, len(vocab), encoder, args.max_memory, args.hop)
    model = chainer.links.Classifier(network, label_key='answer')
    opt = chainer.optimizers.Adam()

    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()

    opt.setup(model)

    train_iter = chainer.iterators.SerialIterator(
        train_data, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(
        test_data, args.batchsize, repeat=False, shuffle=False)
    updater = chainer.training.StandardUpdater(
        train_iter, opt, device=args.gpu)
    trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))

    @chainer.training.make_extension()
    def fix_ignore_label(trainer):
        network.fix_ignore_label()

    trainer.extend(fix_ignore_label)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.run()

    if args.model:
        memnn.save_model(args.model, model, vocab) 
Example #15
Source File: train_ch_in1k.py    From imgclsmob with MIT License 4 votes vote down vote up
def prepare_trainer(net,
                    optimizer_name,
                    lr,
                    momentum,
                    num_epochs,
                    train_iter,
                    val_iter,
                    logging_dir_path,
                    num_gpus=0):
    if optimizer_name == "sgd":
        optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
    elif optimizer_name == "nag":
        optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    else:
        raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
    optimizer.setup(net)

    # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
    devices = (0,) if num_gpus > 0 else (-1,)

    updater = training.updaters.StandardUpdater(
        iterator=train_iter,
        optimizer=optimizer,
        device=devices[0])
    trainer = training.Trainer(
        updater=updater,
        stop_trigger=(num_epochs, 'epoch'),
        out=logging_dir_path)

    val_interval = 100000, 'iteration'
    log_interval = 1000, 'iteration'

    trainer.extend(
        extension=extensions.Evaluator(
            val_iter,
            net,
            device=devices[0]),
        trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(
        extensions.snapshot_object(
            net,
            'model_iter_{.updater.iteration}'),
        trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
            'lr']),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    return trainer 
Example #16
Source File: train_ch_cifar.py    From imgclsmob with MIT License 4 votes vote down vote up
def prepare_trainer(net,
                    optimizer_name,
                    lr,
                    momentum,
                    num_epochs,
                    train_iter,
                    val_iter,
                    logging_dir_path,
                    num_gpus=0):
    if optimizer_name == "sgd":
        optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
    elif optimizer_name == "nag":
        optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    else:
        raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
    optimizer.setup(net)

    # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
    devices = (0,) if num_gpus > 0 else (-1,)

    updater = training.updaters.StandardUpdater(
        iterator=train_iter,
        optimizer=optimizer,
        device=devices[0])
    trainer = training.Trainer(
        updater=updater,
        stop_trigger=(num_epochs, 'epoch'),
        out=logging_dir_path)

    val_interval = 100000, 'iteration'
    log_interval = 1000, 'iteration'

    trainer.extend(
        extension=extensions.Evaluator(
            val_iter,
            net,
            device=devices[0]),
        trigger=val_interval)
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(
        extensions.snapshot_object(
            net,
            'model_iter_{.updater.iteration}'),
        trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
            'lr']),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    return trainer 
Example #17
Source File: train_ch.py    From imgclsmob with MIT License 4 votes vote down vote up
def prepare_trainer(net,
                    optimizer_name,
                    lr,
                    momentum,
                    num_epochs,
                    train_data,
                    val_data,
                    logging_dir_path,
                    use_gpus):
    if optimizer_name == "sgd":
        optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
    elif optimizer_name == "nag":
        optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
    else:
        raise Exception("Unsupported optimizer: {}".format(optimizer_name))
    optimizer.setup(net)

    # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
    devices = (0,) if use_gpus else (-1,)

    updater = training.updaters.StandardUpdater(
        iterator=train_data["iterator"],
        optimizer=optimizer,
        device=devices[0])
    trainer = training.Trainer(
        updater=updater,
        stop_trigger=(num_epochs, "epoch"),
        out=logging_dir_path)

    val_interval = 100000, "iteration"
    log_interval = 1000, "iteration"

    trainer.extend(
        extension=extensions.Evaluator(
            iterator=val_data["iterator"],
            target=net,
            device=devices[0]),
        trigger=val_interval)
    trainer.extend(extensions.dump_graph("main/loss"))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(
        extensions.snapshot_object(
            net,
            "model_iter_{.updater.iteration}"),
        trigger=val_interval)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(
        extensions.PrintReport([
            "epoch", "iteration", "main/loss", "validation/main/loss", "main/accuracy", "validation/main/accuracy",
            "lr"]),
        trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    return trainer 
Example #18
Source File: gen_mnist_mlp.py    From chainer-compiler with MIT License 4 votes vote down vote up
def run_training(args, model):
    trainer = create_trainer(args, model)

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    if args.plot and extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch', file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run() 
Example #19
Source File: gen_resnet50.py    From chainer-compiler with MIT License 4 votes vote down vote up
def run_training(args, model):
    trainer = create_trainer(args, model)

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    if args.plot and extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch', file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run() 
Example #20
Source File: mnist.py    From cloudml-samples with Apache License 2.0 4 votes vote down vote up
def main():
  # Training settings
  args = get_args()

  # Set up a neural network to train
  model = L.Classifier(Net())

  if args.gpu >= 0:
    # Make a specified GPU current
    chainer.backends.cuda.get_device_from_id(args.gpu).use()
    model.to_gpu() # Copy the model to the GPU

  # Setup an optimizer
  optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=args.momentum)
  optimizer.setup(model)

  # Load the MNIST dataset
  train, test = chainer.datasets.get_mnist(ndim=3)
  train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
  test_iter = chainer.iterators.SerialIterator(test, args.test_batch_size,
                                               repeat=False, shuffle=False)

  # Set up a trainer
  updater = training.updaters.StandardUpdater(
      train_iter, optimizer, device=args.gpu)
  trainer = training.Trainer(updater, (args.epochs, 'epoch'))

  # Evaluate the model with the test dataset for each epoch
  trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

  # Write a log of evaluation statistics for each epoch
  trainer.extend(extensions.LogReport())

  # Print selected entries of the log to stdout
  trainer.extend(extensions.PrintReport(
      ['epoch', 'main/loss', 'validation/main/loss',
       'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

  # Send selected entries of the log to CMLE HP tuning system
  trainer.extend(
    HpReport(hp_metric_val='validation/main/loss', hp_metric_tag='my_loss'))

  if args.resume:
    # Resume from a snapshot
    tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
    if not os.path.exists(tmp_model_file):
      subprocess.check_call([
        'gsutil', 'cp', os.path.join(args.model_dir, MODEL_FILE_NAME),
        tmp_model_file])
    if os.path.exists(tmp_model_file):
      chainer.serializers.load_npz(tmp_model_file, trainer)
  
  trainer.run()

  if args.model_dir:
    tmp_model_file = os.path.join('/tmp', MODEL_FILE_NAME)
    serializers.save_npz(tmp_model_file, model)
    subprocess.check_call([
        'gsutil', 'cp', tmp_model_file,
        os.path.join(args.model_dir, MODEL_FILE_NAME)])