Python chainer.training.extension.PRIORITY_WRITER Examples
The following are 3
code examples of chainer.training.extension.PRIORITY_WRITER().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.training.extension
, or try the search function
.
Example #1
Source File: value_observation.py From chainer with MIT License | 6 votes |
def observe_value(observation_key, target_func): """Returns a trainer extension to continuously record a value. Args: observation_key (str): Key of observation to record. target_func (function): Function that returns the value to record. It must take one argument: :class:~chainer.training.Trainer object. Returns: The extension function. This extension is triggered each epoch by default. To change this, use the ``trigger`` argument with the :meth:`Trainer.extend() <chainer.training.Trainer.extend>` method. """ @extension.make_extension( trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER) def _observe_value(trainer): trainer.observation[observation_key] = target_func(trainer) return _observe_value
Example #2
Source File: monitor.py From LaSO with BSD 3-Clause "New" or "Revised" License | 6 votes |
def Monitor(base_name="main"): """Returns a trainer extension to monitor a model. This extension calls the `monitor` method of a model each epoch. Note: Not used. Here for reference. """ @extension.make_extension( trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER) def _monitor_model(trainer): trainer.updater.get_all_optimizers()[base_name].target.predictor.monitor() return _monitor_model
Example #3
Source File: test_observation_aggregator.py From chainer with MIT License | 4 votes |
def run_test_observation_aggregator(comm, xp, use_chainer_variable, communicate_interval, use_gpu): model = DummyChain() if use_gpu: # Use CuPy's Device class to force call cudaSetDevice() chainer.cuda.get_device_from_id(comm.intra_rank).use() device = get_device(comm.intra_rank if use_gpu else None, xp == chainerx) if xp == chainerx: train = xp.array(np.random.rand(10, 1).astype(np.float32)) else: train = xp.random.rand(10, 1).astype(np.float32) model.to_device(device) train_iter = chainer.iterators.SerialIterator(train, batch_size=1, repeat=True, shuffle=True) optimizer = chainermn.create_multi_node_optimizer( chainer.optimizers.Adam(), comm) optimizer.setup(model) updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device) trainer = chainer.training.Trainer(updater, (1, 'epoch')) @extension.make_extension( trigger=(1, 'iteration'), priority=extension.PRIORITY_WRITER) def rank_reporter(trainer_): tmp = xp.asarray(comm.rank, dtype=np.float32) if use_chainer_variable: tmp = chainer.Variable(tmp) trainer_.observation['rank'] = tmp @extension.make_extension( trigger=(communicate_interval, 'iteration'), priority=extension.PRIORITY_READER) def aggregated_rank_checker(trainer_): actual = trainer_.observation['rank-aggregated'] if use_chainer_variable: actual = actual.data expected = (comm.size - 1) / 2 chainer.testing.assert_allclose(actual, expected) trainer.extend(rank_reporter) trainer.extend(ObservationAggregator( comm, 'rank', 'rank-aggregated', comm_trigger=(communicate_interval, 'iteration'))) trainer.extend(aggregated_rank_checker) trainer.run()