Python chainer.function.no_backprop_mode() Examples
The following are 11
code examples of chainer.function.no_backprop_mode().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.function
, or try the search function
.
Example #1
Source File: multi_node_evaluator.py From chainer with MIT License | 6 votes |
def _evaluate_local_single(self, iterator): for batch in iterator: in_arrays = convert._call_converter( self.converter, batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): results = self.calc_local(*in_arrays) elif isinstance(in_arrays, dict): results = self.calc_local(**in_arrays) else: results = self.calc_local(in_arrays) if self._progress_hook: self._progress_hook(batch) yield results
Example #2
Source File: resnet_layer.py From nips17-adversarial-attack with MIT License | 5 votes |
def predict(self, images, oversample=True): """Computes all the probabilities of given images. Args: images (iterable of PIL.Image or numpy.ndarray): Input images. oversample (bool): If ``True``, it averages results across center, corners, and mirrors. Otherwise, it uses only the center. Returns: ~chainer.Variable: Output that contains the class probabilities of given images. """ x = concat_examples([prepare(img, size=(256, 256)) for img in images]) if oversample: x = imgproc.oversample(x, crop_dims=(224, 224)) else: x = x[:, :, 16:240, 16:240] # Use no_backprop_mode to reduce memory consumption with function.no_backprop_mode(): x = Variable(self.xp.asarray(x)) y = self(x, layers=['prob'])['prob'] if oversample: n = y.data.shape[0] // 10 y_shape = y.data.shape[1:] y = reshape(y, (n, 10) + y_shape) y = sum(y, axis=1) / 10 return y
Example #3
Source File: evaluator.py From contextual_augmentation with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) # summary = reporter_module.DictSummary() summary = collections.defaultdict(list) for batch in it: observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) n_data = len(batch) summary['n'].append(n_data) # summary.add(observation) for k, v in observation.items(): summary[k].append(v) mean = dict() ns = summary['n'] del summary['n'] for k, vs in summary.items(): mean[k] = sum(v * n for v, n in zip(vs, ns)) / sum(ns) return mean # return summary.compute_mean()
Example #4
Source File: utils.py From imgclsmob with MIT License | 5 votes |
def __call__(self, imgs): imgs = self.model.xp.asarray([self.do_transform(img) for img in imgs]) with using_config("train", False), no_backprop_mode(): imgs = Variable(imgs) predictions = self.model(imgs) output = to_cpu(predictions.array if hasattr(predictions, "array") else cupy.asnumpy(predictions)) return output
Example #5
Source File: multi_node_evaluator.py From chainer with MIT License | 5 votes |
def _evaluate_local(self, iterator): # Check whether local eval is all done every 8 rounds gather_interval = 8 all_done = None while not all_done: all_done = None results = None for _ in range(gather_interval): try: batch = iterator.next() in_arrays = convert._call_converter( self.converter, batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): results = self.calc_local(*in_arrays) elif isinstance(in_arrays, dict): results = self.calc_local(**in_arrays) else: results = self.calc_local(in_arrays) if self.comm.rank == self.root and self._progress_hook: self._progress_hook(batch) except StopIteration: batch = None results = None results = self.comm.gather_obj(results, root=self.root) if self.comm.rank == self.root: valid_results = [r for r in results if r is not None] for result in valid_results: yield result all_done = len(valid_results) == 0 all_done = self.comm.bcast_obj(all_done, root=self.root) return
Example #6
Source File: vgg.py From chainer with MIT License | 5 votes |
def predict(self, images, oversample=True): """Computes all the probabilities of given images. Args: images (iterable of PIL.Image or numpy.ndarray): Input images. When you specify a color image as a :class:`numpy.ndarray`, make sure that color order is RGB. oversample (bool): If ``True``, it averages results across center, corners, and mirrors. Otherwise, it uses only the center. Returns: ~chainer.Variable: Output that contains the class probabilities of given images. """ x = concat_examples([prepare(img, size=(256, 256)) for img in images]) if oversample: x = imgproc.oversample(x, crop_dims=(224, 224)) else: x = x[:, :, 16:240, 16:240] # Use no_backprop_mode to reduce memory consumption with function.no_backprop_mode(), chainer.using_config('train', False): x = Variable(self.xp.asarray(x)) y = self(x, layers=['prob'])['prob'] if oversample: n = len(y) // 10 y_shape = y.shape[1:] y = reshape(y, (n, 10) + y_shape) y = sum(y, axis=1) / 10 return y
Example #7
Source File: resnet.py From chainer with MIT License | 5 votes |
def predict(self, images, oversample=True): """Computes all the probabilities of given images. Args: images (iterable of PIL.Image or numpy.ndarray): Input images. When you specify a color image as a :class:`numpy.ndarray`, make sure that color order is RGB. oversample (bool): If ``True``, it averages results across center, corners, and mirrors. Otherwise, it uses only the center. Returns: ~chainer.Variable: Output that contains the class probabilities of given images. """ x = concat_examples([prepare(img, size=(256, 256)) for img in images]) if oversample: x = imgproc.oversample(x, crop_dims=(224, 224)) else: x = x[:, :, 16:240, 16:240] # Use no_backprop_mode to reduce memory consumption with function.no_backprop_mode(), chainer.using_config('train', False): x = Variable(self.xp.asarray(x)) y = self(x, layers=['prob'])['prob'] if oversample: n = len(y) // 10 y_shape = y.shape[1:] y = reshape(y, (n, 10) + y_shape) y = sum(y, axis=1) / 10 return y
Example #8
Source File: googlenet.py From chainer with MIT License | 5 votes |
def predict(self, images, oversample=True): """Computes all the probabilities of given images. Args: images (iterable of PIL.Image or numpy.ndarray): Input images. When you specify a color image as a :class:`numpy.ndarray`, make sure that color order is RGB. oversample (bool): If ``True``, it averages results across center, corners, and mirrors. Otherwise, it uses only the center. Returns: ~chainer.Variable: Output that contains the class probabilities of given images. """ x = concat_examples([prepare(img, size=(256, 256)) for img in images]) if oversample: x = imgproc.oversample(x, crop_dims=(224, 224)) else: x = x[:, :, 16:240, 16:240] # Use no_backprop_mode to reduce memory consumption with function.no_backprop_mode(), chainer.using_config('train', False): x = Variable(self.xp.asarray(x)) y = self(x, layers=['prob'])['prob'] if oversample: n = len(y) // 10 y_shape = y.shape[1:] y = reshape(y, (n, 10) + y_shape) y = average(y, axis=1) return y
Example #9
Source File: evaluator.py From 3dpose_gan with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] gen = self._targets['gen'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): xy_proj, xyz, scale = self.converter(batch, self.device) xy_proj, xyz = xy_proj[:, 0], xyz[:, 0] with function.no_backprop_mode(), \ chainer.using_config('train', False): xy_real = chainer.Variable(xy_proj) z_pred = gen(xy_real) z_mse = F.mean_squared_error(z_pred, xyz[:, 2::3]) chainer.report({'z_mse': z_mse}, gen) lx = gen.xp.power(xyz[:, 0::3] - xy_proj[:, 0::2], 2) ly = gen.xp.power(xyz[:, 1::3] - xy_proj[:, 1::2], 2) lz = gen.xp.power(xyz[:, 2::3] - z_pred.data, 2) euclidean_distance = gen.xp.sqrt(lx + ly + lz).mean(axis=1) euclidean_distance *= scale[:, 0] euclidean_distance = gen.xp.mean(euclidean_distance) chainer.report( {'euclidean_distance': euclidean_distance}, gen) summary.add(observation) return summary.compute_mean()
Example #10
Source File: MyEvaluator.py From HFT-CNN with MIT License | 5 votes |
def evaluate(self): iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): row_idx, col_idx, val_idx = [], [], [] x = cuda.to_gpu(np.array([i[0] for i in batch])) labels = [l[1] for l in batch] for i in range(len(labels)): l_list = list(set(labels[i])) for y in l_list: row_idx.append(i) col_idx.append(y) val_idx.append(1) m = len(labels) n = self.class_dim t = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense() t = cuda.to_gpu(t) with function.no_backprop_mode(): loss = F.sigmoid_cross_entropy(eval_func(x), t) summary.add({MyEvaluator.default_name + '/main/loss':loss}) summary.add(observation) return summary.compute_mean()
Example #11
Source File: custom_mean_evaluator.py From kiss with GNU General Public License v3.0 | 4 votes |
def evaluate(self): """Evaluates the model and returns a result dictionary. This method runs the evaluation loop over the validation dataset. It accumulates the reported values to :class:`~chainer.DictSummary` and returns a dictionary whose values are means computed by the summary. Note that this function assumes that the main iterator raises ``StopIteration`` or code in the evaluation loop raises an exception. So, if this assumption is not held, the function could be caught in an infinite loop. Users can override this method to customize the evaluation routine. .. note:: This method encloses :attr:`eval_func` calls with :func:`function.no_backprop_mode` context, so all calculations using :class:`~chainer.FunctionNode`\\s inside :attr:`eval_func` do not make computational graphs. It is for reducing the memory consumption. Returns: dict: Result dictionary. This dictionary is further reported via :func:`~chainer.report` without specifying any observer. """ iterator = self._iterators['main'] eval_func = self.eval_func or self._targets['main'] if self.eval_hook: self.eval_hook(self) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) if self.max_num_iterations is not None: it = self.fixed_num_iterations_iterator(it) summary = reporter_module.DictSummary() for batch in it: observation = {} with reporter_module.report_scope(observation): in_arrays = self.converter(batch, self.device) with function.no_backprop_mode(): if isinstance(in_arrays, tuple): eval_func(*in_arrays) elif isinstance(in_arrays, dict): eval_func(**in_arrays) else: eval_func(in_arrays) summary.add(observation) return self.calculate_mean_of_summary(summary)