Python chainer.dataset.convert.concat_examples() Examples

The following are 30 code examples of chainer.dataset.convert.concat_examples(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.dataset.convert , or try the search function .
Example #1
Source File: r2_score_evaluator.py    From chainer-chemistry with MIT License 6 votes vote down vote up
def __init__(self, iterator, target, converter=convert.concat_examples,
                 device=None, eval_hook=None, eval_func=None, name=None,
                 pos_label=1, ignore_labels=None, raise_value_error=True,
                 logger=None, sample_weight=None,
                 multioutput='uniform_average', ignore_nan=False):
        metrics_fun = {'r2_score': self.r2_score}
        super(R2ScoreEvaluator, self).__init__(
            iterator, target, converter=converter, device=device,
            eval_hook=eval_hook, eval_func=eval_func, metrics_fun=metrics_fun,
            name=name, logger=logger)

        self.pos_label = pos_label
        self.ignore_labels = ignore_labels
        self.raise_value_error = raise_value_error
        self.sample_weight = sample_weight
        self.multioutput = multioutput
        self.ignore_nan = ignore_nan 
Example #2
Source File: lm.py    From espnet with Apache License 2.0 6 votes vote down vote up
def concat_examples(batch, device=None, padding=None):
    """Concat examples in minibatch.

    :param np.ndarray batch: The batch to concatenate
    :param int device: The device to send to
    :param Tuple[int,int] padding: The padding to use
    :return: (inputs, targets)
    :rtype (torch.Tensor, torch.Tensor)
    """
    x, t = convert.concat_examples(batch, padding=padding)
    x = torch.from_numpy(x)
    t = torch.from_numpy(t)
    if device is not None and device >= 0:
        x = x.cuda(device)
        t = t.cuda(device)
    return x, t 
Example #3
Source File: lm.py    From espnet with Apache License 2.0 6 votes vote down vote up
def evaluate(self):
        val_iter = self.get_iterator("main")
        target = self.get_target("main")
        loss = 0
        count = 0
        for batch in copy.copy(val_iter):
            x, t = convert.concat_examples(batch, device=self.device, padding=(0, -1))
            xp = chainer.backends.cuda.get_array_module(x)
            state = None
            for i in six.moves.range(len(x[0])):
                state, loss_batch = target(state, x[:, i], t[:, i])
                non_zeros = xp.count_nonzero(x[:, i])
                loss += loss_batch.data * non_zeros
                count += int(non_zeros)
        # report validation loss
        observation = {}
        with reporter.report_scope(observation):
            reporter.report({"loss": float(loss / count)}, target)
        return observation 
Example #4
Source File: portrait_vis_evaluator.py    From portrait_matting with GNU General Public License v3.0 6 votes vote down vote up
def __init__(
            self, iterator, target, device=None,
            converter=convert.concat_examples, label_names=None,
            filename='segmmentation_iter={iteration}_idx={index}.jpg',
            mode='seg', n_processes=None):

        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self.iterators = iterator

        if isinstance(target, link.Link):
            target = {'main': target}
        self.targets = target

        self.device = device
        self.converter = converter
        self.label_names = label_names
        self.filename = filename
        self.mode = mode
        self.n_processes = n_processes or multiprocessing.cpu_count() 
Example #5
Source File: batch_evaluator.py    From chainer-chemistry with MIT License 6 votes vote down vote up
def __init__(self, iterator, target, converter=convert.concat_examples,
                 device=None, eval_hook=None, eval_func=None, metrics_fun=None,
                 name=None, logger=None):
        super(BatchEvaluator, self).__init__(
            iterator, target, converter=converter, device=device,
            eval_hook=eval_hook, eval_func=eval_func)
        self.name = name
        self.logger = logger or getLogger()

        if callable(metrics_fun):
            # TODO(mottodora): use better name or infer
            self.metrics_fun = {"evaluation": metrics_fun}
        elif isinstance(metrics_fun, dict):
            self.metrics_fun = metrics_fun
        else:
            raise TypeError('Unexpected type metrics_fun must be Callable or '
                            'dict.') 
Example #6
Source File: multi_node_evaluator.py    From chainer with MIT License 6 votes vote down vote up
def __init__(self, comm, iterator, target, device=None,
                 converter=convert.concat_examples, root=0,
                 **kwargs):
        progress_hook, = argument.parse_kwargs(kwargs, ('progress_hook', None))

        self.comm = comm
        self.iterator = iterator
        self._targets = {"main": target}
        self.converter = converter

        if device is not None:
            device = backend.get_device(device)
        self.device = device

        self._progress_hook = progress_hook

        assert 0 <= root and root < self.comm.size
        self.root = root 
Example #7
Source File: train_model_chainermn.py    From graph-nvp with MIT License 6 votes vote down vote up
def __init__(self, iterator, opt, device, loss_func,
                 converter=convert.concat_examples):
        super(MolNvpUpdater, self).__init__(
            iterator=iterator,
            optimizer=opt,
            converter=converter,
            loss_func=loss_func,
            device=device,
            loss_scale=None,
        )
        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self.iterator = iterator
        self.opt = opt
        self.device = device
        self.loss_func = loss_func
        self.model = opt.target
        self.converter = converter 
Example #8
Source File: train_model.py    From graph-nvp with MIT License 6 votes vote down vote up
def __init__(self, iterator, opt, device, loss_func,
                 converter=convert.concat_examples):
        super(MolNvpUpdater, self).__init__(
            iterator=iterator,
            optimizer=opt,
            converter=converter,
            loss_func=loss_func,
            device=device,
            loss_scale=None,
        )
        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self.iterator = iterator
        self.opt = opt
        self.device = device
        self.loss_func = loss_func
        self.model = opt.target
        self.converter = converter 
Example #9
Source File: MyUpdater.py    From HFT-CNN with MIT License 6 votes vote down vote up
def __init__(self, iterator, optimizer, class_dim, converter=convert.concat_examples,
                device=None, loss_func=None):
        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self._iterators = iterator

        if not isinstance(optimizer, dict):
            optimizer = {'main': optimizer}
        self._optimizers = optimizer

        if device is not None and device >= 0:
            for optimizer in six.itervalues(self._optimizers):
                optimizer.target.to_gpu(device)

        self.converter = converter
        self.loss_func = loss_func
        self.device = device
        self.iteration = 0
        self.class_dim = class_dim 
Example #10
Source File: updater.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def __init__(self, iterator_a, iterator_b, opt_g_a, opt_g_b, opt_d_a, opt_d_b, device):
        self._iterators = {'main': iterator_a, 'second': iterator_b}
        self.generator_ab = opt_g_a.target
        self.generator_ba = opt_g_b.target
        self.discriminator_a = opt_d_a.target
        self.discriminator_b = opt_d_b.target
        self._optimizers = {
            'generator_ab': opt_g_a,
            'generator_ba': opt_g_b,
            'discriminator_a': opt_d_a,
            'discriminator_b': opt_d_b,
            }
           
        self.itr_a = iterator_a
        self.itr_b = iterator_b
        self.opt_g_a = opt_g_a
        self.opt_g_b = opt_g_b
        self.opt_d_a = opt_d_a
        self.opt_d_b = opt_d_b

        self.converter = convert.concat_examples
        self.device = device
        self.iteration = 0
        self.xp = self.generator_ab.xp
        self.bch = iterator_a.batch_size 
Example #11
Source File: googlenet.py    From deep_metric_learning with MIT License 5 votes vote down vote up
def extract(self, images, layers=['pool5'], size=(224, 224)):
        """Extracts all the feature maps of given images.

        The difference of directly executing ``__call__`` is that
        it directly accepts images as an input and automatically
        transforms them to a proper variable. That is,
        it is also interpreted as a shortcut method that implicitly calls
        ``prepare`` and ``__call__`` functions.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            layers (list of str): The list of layer names you want to extract.
            size (pair of ints): The resolution of resized images used as
                an input of CNN. All the given images are not resized
                if this argument is ``None``, but the resolutions of
                all the images should be the same.
            train (bool): If ``True``, Dropout runs in training mode.
            volatile (~chainer.Flag): Volatility flag used for input variables.

        Returns:
            Dictionary of ~chainer.Variable: A directory in which
            the key contains the layer name and the value contains
            the corresponding feature map variable.

        """

        x = concat_examples([prepare(img, size=size) for img in images])
        x = Variable(self.xp.asarray(x))
        return self(x, layers=layers) 
Example #12
Source File: googlenet.py    From deep_metric_learning with MIT License 5 votes vote down vote up
def predict(self, images, oversample=True):
        """Computes all the probabilities of given images.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            oversample (bool): If ``True``, it averages results across
                center, corners, and mirrors. Otherwise, it uses only the
                center.

        Returns:
            ~chainer.Variable: Output that contains the class probabilities
            of given images.

        """

        x = concat_examples([prepare(img, size=(256, 256)) for img in images])
        if oversample:
            x = imgproc.oversample(x, crop_dims=(224, 224))
        else:
            x = x[:, :, 16:240, 16:240]
        # Set volatile option to ON to reduce memory consumption
        x = Variable(self.xp.asarray(x))
        y = self(x, layers=['prob'])['prob']
        if oversample:
            n = y.data.shape[0] // 10
            y_shape = y.data.shape[1:]
            y = reshape(y, (n, 10) + y_shape)
            y = sum(y, axis=1) / 10
        return y 
Example #13
Source File: roc_auc_evaluator.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def __init__(self, iterator, target, converter=convert.concat_examples,
                 device=None, eval_hook=None, eval_func=None, name=None,
                 pos_labels=1, ignore_labels=None, raise_value_error=True,
                 logger=None):
        metrics_fun = {'roc_auc': self.roc_auc_score}
        super(ROCAUCEvaluator, self).__init__(
            iterator, target, converter=converter, device=device,
            eval_hook=eval_hook, eval_func=eval_func, metrics_fun=metrics_fun,
            name=name, logger=logger)

        self.pos_labels = _to_list(pos_labels)
        self.ignore_labels = _to_list(ignore_labels)
        self.raise_value_error = raise_value_error 
Example #14
Source File: MyEvaluator.py    From HFT-CNN with MIT License 5 votes vote down vote up
def __init__(self, iterator, target, class_dim, converter=convert.concat_examples,  
                 device=None, eval_hook=None, eval_func=None):
        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self._iterators = iterator

        if isinstance(target, link.Link):
            target = {'main': target}
        self._targets = target

        self.converter = converter
        self.device = device
        self.eval_hook = eval_hook
        self.eval_func = eval_func
        self.class_dim = class_dim 
Example #15
Source File: regressor.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def predict(
            self, data, batchsize=16, converter=concat_examples,
            retain_inputs=False, preprocess_fn=None, postprocess_fn=None):
        """Predict label of each category by taking .

        Args:
            data: input data
            batchsize (int): batch size
            converter (Callable): convert from `data` to `inputs`
            preprocess_fn (Callable): Its input is numpy.ndarray or
                cupy.ndarray, it can return either Variable, cupy.ndarray or
                numpy.ndarray
            postprocess_fn (Callable): Its input argument is Variable,
                but this method may return either Variable, cupy.ndarray or
                numpy.ndarray.
            retain_inputs (bool): If True, this instance keeps inputs in
                `self.inputs` or not.

        Returns (tuple or numpy.ndarray): Typically, it is 1-dimensional int
            array with shape (batchsize, ) which represents each examples
            category prediction.

        """
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            predict_labels = self._forward(
                data, fn=self.predictor, batchsize=batchsize,
                converter=converter, retain_inputs=retain_inputs,
                preprocess_fn=preprocess_fn, postprocess_fn=postprocess_fn)
        return predict_labels 
Example #16
Source File: classifier.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def predict(
            self, data, batchsize=16, converter=concat_examples,
            retain_inputs=False, preprocess_fn=None, postprocess_fn=_argmax):
        """Predict label of each category by taking .

        Args:
            data: input data
            batchsize (int): batch size
            converter (Callable): convert from `data` to `inputs`
            preprocess_fn (Callable): Its input is numpy.ndarray or
                cupy.ndarray, it can return either Variable, cupy.ndarray or
                numpy.ndarray
            postprocess_fn (Callable): Its input argument is Variable,
                but this method may return either Variable, cupy.ndarray or
                numpy.ndarray.
            retain_inputs (bool): If True, this instance keeps inputs in
                `self.inputs` or not.

        Returns (tuple or numpy.ndarray): Typically, it is 1-dimensional int
            array with shape (batchsize, ) which represents each examples
            category prediction.

        """
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            predict_labels = self._forward(
                data, fn=self.predictor, batchsize=batchsize,
                converter=converter, retain_inputs=retain_inputs,
                preprocess_fn=preprocess_fn, postprocess_fn=postprocess_fn)
        return predict_labels

    # --- For backward compatibility --- 
Example #17
Source File: classifier.py    From chainer-chemistry with MIT License 5 votes vote down vote up
def predict_proba(
            self, data, batchsize=16, converter=concat_examples,
            retain_inputs=False, preprocess_fn=None,
            postprocess_fn=chainer.functions.softmax):
        """Calculate probability of each category.

        Args:
            data: "train_x array" or "chainer dataset"
            fn (Callable): Main function to forward. Its input argument is
                either Variable, cupy.ndarray or numpy.ndarray, and returns
                Variable.
            batchsize (int): batch size
            converter (Callable): convert from `data` to `inputs`
            preprocess_fn (Callable): Its input is numpy.ndarray or
                cupy.ndarray, it can return either Variable, cupy.ndarray or
                numpy.ndarray
            postprocess_fn (Callable): Its input argument is Variable,
                but this method may return either Variable, cupy.ndarray or
                numpy.ndarray.
            retain_inputs (bool): If True, this instance keeps inputs in
                `self.inputs` or not.

        Returns (tuple or numpy.ndarray): Typically, it is 2-dimensional float
            array with shape (batchsize, number of category) which represents
            each examples probability to be each category.

        """
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            proba = self._forward(
                data, fn=self.predictor, batchsize=batchsize,
                converter=converter, retain_inputs=retain_inputs,
                preprocess_fn=preprocess_fn, postprocess_fn=postprocess_fn)
        return proba 
Example #18
Source File: train.py    From portrait_matting with GNU General Public License v3.0 5 votes vote down vote up
def select_converter(mode='seg'):
    if mode in ['seg', 'seg+', 'seg_tri']:
        return convert.concat_examples  # Default
    elif mode == 'mat':
        return custom_converters.matting_converter
    else:
        logger.error('Unknown mode') 
Example #19
Source File: matting_converter.py    From portrait_matting with GNU General Public License v3.0 5 votes vote down vote up
def matting_converter(batch, device=None, padding=None):
    first_elem = batch[0]
    assert isinstance(first_elem, tuple)
    assert len(first_elem) == 3

    # Collect pure GPU variables (img)
    gpu_batch = [v[0] for v in batch]
    gpu_batch = convert.concat_examples(gpu_batch, device, padding)

    # Collect pure CPU variables (alpha, weight)
    cpu_batch = [v[1:] for v in batch]
    cpu_batch = convert.concat_examples(cpu_batch, -1, padding)

    # Concatenate
    return (gpu_batch, *cpu_batch) 
Example #20
Source File: trainer.py    From Deep_VoiceChanger with MIT License 5 votes vote down vote up
def preview_convert(iterator_a, iterator_b, g_a, g_b, device, gla, dst):
    @chainer.training.make_extension()
    def make_preview(trainer):
        with chainer.using_config('train', False):
            with chainer.no_backprop_mode():
                x_a = iterator_a.next()
                x_a = convert.concat_examples(x_a, device)
                x_a = chainer.Variable(x_a)

                x_b = iterator_b.next()
                x_b = convert.concat_examples(x_b, device)
                x_b = chainer.Variable(x_b)

                x_ab = g_a(x_a)
                x_ba = g_b(x_b)

                x_bab = g_a(x_ba)
                x_aba = g_b(x_ab)

                preview_dir = '{}/preview'.format(dst)
                if not os.path.exists(preview_dir):
                    os.makedirs(preview_dir)
                image_dir = '{}/image'.format(dst)
                if not os.path.exists(image_dir):
                    os.makedirs(image_dir)

                names = ['a', 'ab', 'aba', 'b', 'ba', 'bab']
                images = [x_a, x_ab, x_aba, x_b, x_ba, x_bab]
                for n, i in zip(names, images):
                    i = cp.asnumpy(i.data)[:,:,padding:-padding,:].reshape(1, -1, 128)
                    image.save(image_dir+'/{}{}.jpg'.format(trainer.updater.epoch,n), i)
                    w = np.concatenate([gla.inverse(_i) for _i in dataset.reverse(i)])
                    dataset.save(preview_dir+'/{}{}.wav'.format(trainer.updater.epoch,n), 16000, w)

    return make_preview 
Example #21
Source File: lm.py    From espnet with Apache License 2.0 5 votes vote down vote up
def update_core(self):
        # When we pass one iterator and optimizer to StandardUpdater.__init__,
        # they are automatically named 'main'.
        train_iter = self.get_iterator("main")
        optimizer = self.get_optimizer("main")

        count = 0
        sum_loss = 0
        optimizer.target.cleargrads()  # Clear the parameter gradients
        for _ in range(self.accum_grad):
            # Progress the dataset iterator for sentences at each iteration.
            batch = train_iter.__next__()
            x, t = convert.concat_examples(batch, device=self.device, padding=(0, -1))
            # Concatenate the token IDs to matrices and send them to the device
            # self.converter does this job
            # (it is chainer.dataset.concat_examples by default)
            xp = chainer.backends.cuda.get_array_module(x)
            loss = 0
            state = None
            batch_size, sequence_length = x.shape
            for i in six.moves.range(sequence_length):
                # Compute the loss at this time step and accumulate it
                state, loss_batch = optimizer.target(
                    state, chainer.Variable(x[:, i]), chainer.Variable(t[:, i])
                )
                non_zeros = xp.count_nonzero(x[:, i])
                loss += loss_batch * non_zeros
                count += int(non_zeros)
            # backward
            loss /= batch_size * self.accum_grad  # normalized by batch size
            sum_loss += float(loss.data)
            loss.backward()  # Backprop
            loss.unchain_backward()  # Truncate the graph

        reporter.report({"loss": sum_loss}, optimizer.target)
        reporter.report({"count": count}, optimizer.target)
        # update
        optimizer.update()  # Update the parameters
        self.scheduler.step(self.iteration) 
Example #22
Source File: lm.py    From espnet with Apache License 2.0 5 votes vote down vote up
def evaluate(self):
        """Evaluate the model."""
        val_iter = self.get_iterator("main")
        loss = 0
        nll = 0
        count = 0
        self.model.eval()
        with torch.no_grad():
            for batch in copy.copy(val_iter):
                x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
                if self.device[0] == -1:
                    l, n, c = self.model(x, t)
                else:
                    # apex does not support torch.nn.DataParallel
                    l, n, c = data_parallel(self.model, (x, t), self.device)
                loss += float(l.sum())
                nll += float(n.sum())
                count += int(c.sum())
        self.model.train()
        # report validation loss
        observation = {}
        with reporter.report_scope(observation):
            reporter.report({"loss": loss}, self.model.reporter)
            reporter.report({"nll": nll}, self.model.reporter)
            reporter.report({"count": count}, self.model.reporter)
        return observation 
Example #23
Source File: lm.py    From espnet with Apache License 2.0 5 votes vote down vote up
def update_core(self):
        """Update the model."""
        # When we pass one iterator and optimizer to StandardUpdater.__init__,
        # they are automatically named 'main'.
        train_iter = self.get_iterator("main")
        optimizer = self.get_optimizer("main")
        # Progress the dataset iterator for sentences at each iteration.
        self.model.zero_grad()  # Clear the parameter gradients
        accum = {"loss": 0.0, "nll": 0.0, "count": 0}
        for _ in range(self.accum_grad):
            batch = train_iter.__next__()
            # Concatenate the token IDs to matrices and send them to the device
            # self.converter does this job
            # (it is chainer.dataset.concat_examples by default)
            x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
            if self.device[0] == -1:
                loss, nll, count = self.model(x, t)
            else:
                # apex does not support torch.nn.DataParallel
                loss, nll, count = data_parallel(self.model, (x, t), self.device)

            # backward
            loss = loss.mean() / self.accum_grad
            if self.use_apex:
                from apex import amp

                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()  # Backprop
            # accumulate stats
            accum["loss"] += float(loss)
            accum["nll"] += float(nll.sum())
            accum["count"] += int(count.sum())

        for k, v in accum.items():
            reporter.report({k: v}, optimizer.target)
        if self.gradclip is not None:
            nn.utils.clip_grad_norm_(self.model.parameters(), self.gradclip)
        optimizer.step()  # Update the parameters
        self.scheduler.step(n_iter=self.iteration) 
Example #24
Source File: train_utils.py    From see with GNU General Public License v3.0 5 votes vote down vote up
def concat_and_pad_examples(batch, device=None, padding=-10000):
    return concat_examples(batch, device=device, padding=padding) 
Example #25
Source File: train_utils.py    From see with GNU General Public License v3.0 5 votes vote down vote up
def get_concat_and_pad_examples(padding=-10000):
    def concat_and_pad_examples(batch, device=None):
        return concat_examples(batch, device=device, padding=padding)

    return concat_and_pad_examples 
Example #26
Source File: train_utils.py    From see with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, iterator, target, converter=convert.concat_examples,
                 device=None, eval_hook=None, eval_func=None, num_iterations=200):
        super(FastEvaluatorBase, self).__init__(
            iterator,
            target,
            converter=converter,
            device=device,
            eval_hook=eval_hook,
            eval_func=eval_func
        )
        self.num_iterations = num_iterations 
Example #27
Source File: seq2seq.py    From convolutional_seq2seq with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def source_pad_concat_convert(x_seqs, device, eos_id=0):
    x_block = convert.concat_examples(x_seqs, device, padding=-1)
    xp = cuda.get_array_module(x_block)

    # add eos
    x_block = xp.pad(x_block, ((0, 0), (0, 1)),
                     'constant', constant_values=-1)
    for i_batch, seq in enumerate(x_seqs):
        x_block[i_batch, len(seq)] = eos_id
    return x_block 
Example #28
Source File: resnet_layer.py    From nips17-adversarial-attack with MIT License 5 votes vote down vote up
def predict(self, images, oversample=True):
        """Computes all the probabilities of given images.

        Args:
            images (iterable of PIL.Image or numpy.ndarray): Input images.
            oversample (bool): If ``True``, it averages results across
                center, corners, and mirrors. Otherwise, it uses only the
                center.

        Returns:
            ~chainer.Variable: Output that contains the class probabilities
            of given images.

        """

        x = concat_examples([prepare(img, size=(256, 256)) for img in images])
        if oversample:
            x = imgproc.oversample(x, crop_dims=(224, 224))
        else:
            x = x[:, :, 16:240, 16:240]
        # Use no_backprop_mode to reduce memory consumption
        with function.no_backprop_mode():
            x = Variable(self.xp.asarray(x))
            y = self(x, layers=['prob'])['prob']
            if oversample:
                n = y.data.shape[0] // 10
                y_shape = y.data.shape[1:]
                y = reshape(y, (n, 10) + y_shape)
                y = sum(y, axis=1) / 10
        return y 
Example #29
Source File: evaluator.py    From chainer with MIT License 5 votes vote down vote up
def __init__(self, iterator, target, converter=convert.concat_examples,
                 device=None, eval_hook=None, eval_func=None, **kwargs):
        progress_bar, = argument.parse_kwargs(kwargs, ('progress_bar', False))

        if device is not None:
            device = backend.get_device(device)

        if isinstance(iterator, iterator_module.Iterator):
            iterator = {'main': iterator}
        self._iterators = iterator

        if isinstance(target, link.Link):
            target = {'main': target}
        self._targets = target

        self.converter = converter
        self.device = device
        self.eval_hook = eval_hook
        self.eval_func = eval_func

        self._progress_bar = progress_bar

        for key, iter in six.iteritems(iterator):
            if (isinstance(iter, (iterators.SerialIterator,
                                  iterators.MultiprocessIterator,
                                  iterators.MultithreadIterator)) and
                    getattr(iter, 'repeat', False)):
                msg = 'The `repeat` property of the iterator {} '
                'is set to `True`. Typically, the evaluator sweeps '
                'over iterators until they stop, '
                'but as the property being `True`, this iterator '
                'might not stop and evaluation could go into '
                'an infinite loop. '
                'We recommend to check the configuration '
                'of iterators'.format(key)
                warnings.warn(msg) 
Example #30
Source File: parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
                 models=None, devices=None, loss_func=None, loss_scale=None,
                 auto_new_epoch=True):
        super(ParallelUpdater, self).__init__(
            iterator=iterator,
            optimizer=optimizer,
            converter=converter,
            loss_func=loss_func,
            loss_scale=loss_scale,
            auto_new_epoch=auto_new_epoch,
        )

        if models is None:
            if devices is None:
                raise ValueError('either models or devices must be specified')
            names = list(six.iterkeys(devices))

            try:
                names.remove('main')
            except ValueError:
                raise KeyError('\'devices\' must contain a \'main\' key.')

            models = {'main': optimizer.target}
            for name in names:
                model = copy.deepcopy(optimizer.target)
                model.to_device(devices[name])
                models[name] = model
            optimizer.target.to_device(devices['main'])

        self._devices = devices
        self._models = models