Python chainer.is_debug() Examples
The following are 30
code examples of chainer.is_debug().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer
, or try the search function
.
Example #1
Source File: bias.py From chainer-stylegan with MIT License | 6 votes |
def forward(self, *xs): """Applies broadcasted elementwise summation. Args: xs (list of Variables): Input variables whose length should be one if the link has a learnable bias parameter, otherwise should be two. """ axis = self.axis # Case of only one argument where b is a learnt parameter. if hasattr(self, 'b'): if chainer.is_debug(): assert len(xs) == 1 x, = xs b = self.b return bias.bias(x, b, axis) # Case of two arguments where b is given as an argument. else: if chainer.is_debug(): assert len(xs) == 2 x, y = xs return bias.bias(x, y, axis)
Example #2
Source File: snapshot.py From pfio with MIT License | 6 votes |
def _scan_directory(fs, directory): local_files = [] try: local_files = fs.list(directory) except Exception as e: if chainer.is_debug(): print("Cannot list directory {}: {}".format(directory, e)) local_files = filter(lambda s: not s.startswith('tmp'), local_files) files = filter(None, [(_parse_filename(f), f) for f in local_files]) files = list(files) files.sort() if len(files) > 0: _i, filename = max(files) return filename else: return None
Example #3
Source File: batch_normalization.py From chainer with MIT License | 6 votes |
def forward(self, axis, gamma, x, x_layout, xp, expander, beta, eps, decay, running_mean, running_var): if running_mean is not None: mean = running_mean var = running_var else: # Create dummies. mean = xp.zeros_like(gamma, dtype=x.dtype) var = xp.zeros_like(gamma, dtype=x.dtype) # mean and inv_std are used as buffers to save # intermediate results computed during forward pass. These buffers # are used to speed-up backward pass. cudnn_x_layout = cuda._get_cudnn_tensor_layout_x(x_layout) reserve_space, y, mean, inv_std = ( cudnn.batch_normalization_forward_training_ex( x, gamma, beta, mean, var, None, None, eps, decay, self.is_for_conv2d, self.cudnn_mode, chainer.is_debug(), d_layout=cudnn_x_layout)) y_layout = x_layout return ( y, y_layout, running_mean, running_var, mean, var, inv_std, reserve_space)
Example #4
Source File: embed_id.py From chainer with MIT License | 6 votes |
def forward(self, inputs): self.retain_inputs((0,)) x, W = inputs self._w_shape = W.shape xp = backend.get_array_module(*inputs) if chainer.is_debug(): valid_x = xp.logical_and(0 <= x, x < len(W)) if self.ignore_label is not None: valid_x = xp.logical_or(valid_x, x == self.ignore_label) if not valid_x.all(): raise ValueError('Each not ignored `x` value need to satisfy ' '`0 <= x < len(W)`') if self.ignore_label is not None: mask = (x == self.ignore_label) return xp.where(mask[..., None], 0, W[xp.where(mask, 0, x)]), return W[x],
Example #5
Source File: scatter_add.py From chainer with MIT License | 6 votes |
def __init__(self, slices): if isinstance(slices, list): if all([isinstance(s, int) for s in slices]): slices = slices, slices = tuple(slices) elif not isinstance(slices, tuple): slices = slices, if chainer.is_debug(): n_ellipses = 0 for s in slices: if s is Ellipsis: n_ellipses += 1 if n_ellipses > 1: raise ValueError('Only one Ellipsis is allowed') self.slices = slices
Example #6
Source File: select_item.py From chainer with MIT License | 6 votes |
def forward(self, inputs): self.retain_inputs((1,)) x, t = inputs self._in_shape = x.shape self._in_dtype = x.dtype if chainer.is_debug(): if not ((0 <= t).all() and (t < x.shape[1]).all()): msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`' raise ValueError(msg) xp = backend.get_array_module(x) if xp is numpy: # This code is equivalent to `t.choose(x.T)`, but `numpy.choose` # does not work when `x.shape[1] > 32`. return x[six.moves.range(t.size), t], else: y = cuda.elementwise( 'S t, raw T x', 'T y', 'int ind[] = {i, t}; y = x[ind];', 'getitem_fwd' )(t, x) return y,
Example #7
Source File: get_item.py From chainer with MIT License | 6 votes |
def __init__(self, slices): if isinstance(slices, list): if all([isinstance(s, int) for s in slices]): slices = slices, slices = tuple(slices) elif not isinstance(slices, tuple): slices = slices, if chainer.is_debug(): n_ellipses = 0 for s in slices: if s is Ellipsis: n_ellipses += 1 if n_ellipses > 1: raise ValueError('Only one Ellipsis is allowed') self.slices = slices
Example #8
Source File: scale.py From chainer-stylegan with MIT License | 5 votes |
def forward(self, *xs): """Applies broadcasted elementwise product. Args: xs (list of Variables): Input variables whose length should be one if the link has a learnable weight parameter, otherwise should be two. """ axis = self.axis # Case of only one argument where W is a learnt parameter. if hasattr(self, 'W'): if chainer.is_debug(): assert len(xs) == 1 x, = xs W = self.W z = scale.scale(x, W, axis) # Case of two arguments where W is given as an argument. else: if chainer.is_debug(): assert len(xs) == 2 x, y = xs z = scale.scale(x, y, axis) # Forward propagate bias term if given. if hasattr(self, 'bias'): return self.bias(z) else: return z
Example #9
Source File: test_softmax_cross_entropy.py From chainer-segnet with MIT License | 5 votes |
def setUp(self): self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32) # `0` is required to avoid NaN self.t = numpy.array([self.t_value, 0], dtype=numpy.int32) self.original_debug = chainer.is_debug() chainer.set_debug(True)
Example #10
Source File: softmax_cross_entropy.py From chainer-segnet with MIT License | 5 votes |
def forward_gpu(self, inputs): cupy = cuda.cupy x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = log_softmax._log_softmax(x, self.use_cudnn) if self.cache_score: self.y = cupy.exp(log_y) if self.class_weight is not None: shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)] log_y *= cupy.broadcast_to( self.class_weight.reshape(shape), x.shape) if self.normalize: coeff = cupy.maximum(1, (t != self.ignore_label).sum()) else: coeff = max(1, len(t)) self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype) log_y = cupy.rollaxis(log_y, 1, log_y.ndim) ret = cuda.reduce( 'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out', 't == -1 ? T(0) : log_y[_j * n_channel + t]', 'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd' )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff) return ret,
Example #11
Source File: softmax_cross_entropy.py From chainer-segnet with MIT License | 5 votes |
def forward_cpu(self, inputs): x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = log_softmax._log_softmax(x, self.use_cudnn) if self.cache_score: self.y = numpy.exp(log_y) if self.class_weight is not None: if self.class_weight.shape != x.shape: shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)] self.class_weight = numpy.broadcast_to( self.class_weight.reshape(shape), x.shape) log_y *= self.class_weight log_yd = numpy.rollaxis(log_y, 1) log_yd = log_yd.reshape(len(log_yd), -1) log_p = log_yd[numpy.maximum(t.ravel(), 0), numpy.arange(t.size)] # deal with the case where the SoftmaxCrossEntropy is # unpickled from the old version if self.normalize: count = (t != self.ignore_label).sum() else: count = len(x) self._coeff = 1.0 / max(count, 1) y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \ * (-self._coeff) return y.reshape(()),
Example #12
Source File: deterministic_embed_id.py From espnet with Apache License 2.0 | 5 votes |
def forward(self, inputs): self.retain_inputs((0,)) x, W = inputs self._w_shape = W.shape if not type_check.same_types(*inputs): raise ValueError( "numpy and cupy must not be used together\n" "type(W): {0}, type(x): {1}".format(type(W), type(x)) ) xp = cuda.get_array_module(*inputs) if chainer.is_debug(): valid_x = xp.logical_and(0 <= x, x < len(W)) if self.ignore_label is not None: valid_x = xp.logical_or(valid_x, x == self.ignore_label) if not valid_x.all(): raise ValueError( "Each not ignored `x` value need to satisfy" "`0 <= x < len(W)`" ) if self.ignore_label is not None: mask = x == self.ignore_label return (xp.where(mask[..., None], 0, W[xp.where(mask, 0, x)]),) return (W[x],)
Example #13
Source File: classification_summary.py From chainer with MIT License | 5 votes |
def forward(self, inputs): xp = backend.get_array_module(*inputs) y, t = inputs # numpy.bincount requires int32 on Windows t = t.astype(xp.int32, copy=False) if self.label_num is None: label_num = xp.amax(t) + 1 else: label_num = self.label_num if chainer.is_debug(): assert (t < label_num).all() mask = (t == self.ignore_label).ravel() pred = xp.where(mask, label_num, y.argmax(axis=1).ravel()) true = xp.where(mask, label_num, t.ravel()) support = xp.bincount(true, minlength=label_num + 1)[:label_num] relevant = xp.bincount(pred, minlength=label_num + 1)[:label_num] tp_mask = xp.where(pred == true, true, label_num) tp = xp.bincount(tp_mask, minlength=label_num + 1)[:label_num] precision = tp / relevant recall = tp / support fbeta = _fbeta_score(precision, recall, self.beta) return precision, recall, fbeta, support
Example #14
Source File: ctc.py From chainer with MIT License | 5 votes |
def forward(self, inputs): xp = backend.get_array_module(inputs[0]) self.input_length, label_length, t, xs = inputs if self.zero_padding is None: if xs.dtype == numpy.float16: self.zero_padding = -10000.0 else: self.zero_padding = -10000000000.0 if chainer.is_debug(): assert len(xs) >= xp.max(self.input_length) assert t.shape[1] >= xp.max(label_length) self.path_length = 2 * label_length + 1 self.yseq = _softmax(xs, xp) log_yseq = self.log_matrix(self.yseq, xp) self.path = _label_to_path(t, self.blank_symbol, xp) self.prob_trans = self.calc_trans( log_yseq, self.input_length, t, label_length, self.path, self.path_length, xp) loss = -_logsumexp(self.prob_trans[0], xp, axis=1) if self.reduce == 'mean': loss = utils.force_array(xp.mean(loss)) return loss,
Example #15
Source File: inv.py From chainer with MIT License | 5 votes |
def forward_gpu(self, x): self.retain_outputs((0,)) invx, info = _inv_gpu(x[0]) if chainer.is_debug(): if cuda.cupy.any(info != 0): raise ValueError('Input has singular matrices.') return invx,
Example #16
Source File: inv.py From chainer with MIT License | 5 votes |
def forward_gpu(self, x): self.retain_outputs((0,)) shape = x[0].shape invx, info = _inv_gpu(x[0].reshape(1, *shape)) if chainer.is_debug(): if cuda.cupy.any(info != 0): raise ValueError('Input has singular matrices.') invx = invx.reshape(shape) return invx,
Example #17
Source File: _snapshot.py From chainer with MIT License | 5 votes |
def initialize(self, trainer): target = trainer if self._target is None else self._target outdir = trainer.out if self.autoload: # If ``autoload`` is on, this code scans the ``outdir`` # for potential snapshot files by matching the file names # from ``filename`` format, picks up the latest one in # terms of mtime, and tries to load it it the target or # trainer. filename = _find_latest_snapshot(self.filename, outdir) if filename is None: if chainer.is_debug(): print('No snapshot file that matches {} was found' .format(self.filename)) else: snapshot_file = os.path.join(outdir, filename) # As described above (at ``autoload`` option), # snapshot files to be autoloaded must be saved by # ``save_npz`` . In order to support general format, # we nned to first reconstruct the design of savefun # and loadfun. npz.load_npz(snapshot_file, target) if chainer.is_debug(): print('Snapshot loaded from', snapshot_file) if (hasattr(self.writer, '_add_cleanup_hook') and self.n_retains > 0 and isinstance(self.filename, str)): # This block sets a method to automatic cleanup of stale # snapshots, when ``n_retains`` argument is positive # number. When the given snapshot writer is Chainer's # built-in writer, a cleanup method that is to be # triggered right after creation of new snapshot file, is # injected here. def _cleanup(): files = _find_stale_snapshots(self.filename, outdir, self.n_retains) for file in files: os.remove(os.path.join(outdir, file)) self.writer._add_cleanup_hook(_cleanup)
Example #18
Source File: scale.py From chainer with MIT License | 5 votes |
def forward(self, *xs): """Applies broadcasted elementwise product. Args: xs (list of Variables): Input variables whose length should be one if the link has a learnable weight parameter, otherwise should be two. """ axis = self.axis # Case of only one argument where W is a learnt parameter. if hasattr(self, 'W'): if chainer.is_debug(): assert len(xs) == 1 x, = xs W = self.W z = scale.scale(x, W, axis) # Case of two arguments where W is given as an argument. else: if chainer.is_debug(): assert len(xs) == 2 x, y = xs z = scale.scale(x, y, axis) # Forward propagate bias term if given. if hasattr(self, 'bias'): return self.bias(z) else: return z
Example #19
Source File: test_softmax_cross_entropy.py From chainer with MIT License | 5 votes |
def setUp(self): self.x = numpy.random.uniform(-1, 1, (2, 2)).astype(numpy.float32) # `0` is required to avoid NaN self.t = numpy.array([self.t_value, 0], dtype=numpy.int32) self.original_debug = chainer.is_debug() chainer.set_debug(True)
Example #20
Source File: test_scatter_add.py From chainer with MIT License | 5 votes |
def setUp(self): self.default_debug = chainer.is_debug() chainer.set_debug(True) self.a_data = numpy.random.uniform(-1, 1, (4, 3, 2)) self.b_data = numpy.random.uniform(-1, 1, (2, 2))
Example #21
Source File: test_select_item.py From chainer with MIT License | 5 votes |
def setUp(self): self.x = numpy.random.uniform(-1, 1, (1, 2)).astype(numpy.float32) self.t = numpy.array([self.t_value], dtype=numpy.int32) self.original_debug = chainer.is_debug() chainer.set_debug(True)
Example #22
Source File: test_split_axis.py From chainer with MIT License | 5 votes |
def setUp(self): self.default_debug = chainer.is_debug() chainer.set_debug(True)
Example #23
Source File: test_permutate.py From chainer with MIT License | 5 votes |
def setUp(self): self.x = numpy.arange(10).reshape((2, 5)).astype('f') self.ind = numpy.array(self.indices, 'i') self.debug = chainer.is_debug() chainer.set_debug(True)
Example #24
Source File: test_embed_id.py From chainer with MIT License | 5 votes |
def setUp(self): self.link = links.EmbedID(2, 2, ignore_label=self.ignore_label) self.t = numpy.array([self.t_value], dtype=numpy.int32) self.original_debug = chainer.is_debug() chainer.set_debug(True)
Example #25
Source File: test_function.py From chainer with MIT License | 5 votes |
def setUp(self): self.original_debug = chainer.is_debug() chainer.set_debug(True) self.one = numpy.array(1, numpy.float32) self.f = chainer.Function()
Example #26
Source File: test_function.py From chainer with MIT License | 5 votes |
def setUp(self): self.original_debug = chainer.is_debug() chainer.set_debug(True) self.one = numpy.array([1], numpy.float32) self.f = chainer.Function()
Example #27
Source File: test_function_node.py From chainer with MIT License | 5 votes |
def setUp(self): self.original_debug = chainer.is_debug() chainer.set_debug(True) self.one = numpy.array([1], numpy.float32) self.f = chainer.FunctionNode()
Example #28
Source File: split_axis.py From chainer with MIT License | 4 votes |
def _get_indices_or_sections(indices_or_sections): """Checks and convert ``indices_or_sections`` argument Converted value is one of: 1-D numpy.ndarray, list, int, and NumPy int scalar. Returns: A binary tuple in which the 1st element is indices (sequence) and the 2nd element is sections (scalar). Only one of the two is not ``None`` and the other is ``None``. """ ios = indices_or_sections is_seq = False if isinstance(ios, numpy.ndarray): # numpy.ndarray if ios.dtype.kind != 'i' and ios.size > 0: # Note: numpy.array([]) (dtype is float64) should be accepted. raise TypeError('indices_or_sections must be integers') if ios.ndim >= 2: raise TypeError('indices_or_sections must be 1-D sequence') is_seq = ios.ndim != 0 elif isinstance(ios, collections_abc.Sequence): # Any sequence except numpy.ndarray ios = list(ios) is_seq = True elif isinstance(indices_or_sections, six.integer_types): # int pass else: raise TypeError( 'indices_or_sections must be integer or 1-D array.\n' 'Actual: {}'.format(type(indices_or_sections))) if is_seq and chainer.is_debug(): for p, n in six.moves.zip(ios, ios[1:]): if p > n: raise ValueError('indices_or_sections must be sorted') if is_seq: return ios, None else: return None, ios
Example #29
Source File: bias.py From chainer with MIT License | 4 votes |
def bias(x, y, axis=1): """Elementwise summation with broadcasting. Computes a elementwise summation of two input variables, with the shape of the latter variable broadcasted to match the shape of the former. ``axis`` is the first axis of the first variable along which the second variable is applied. The term "broadcasting" here comes from Caffe's bias layer so the "broadcasting" with the following arguments:: x : 100 x 3 x 40 x 5 x 6 y : 3 x 40 axis : 1 is equivalent to the following numpy broadcasting:: x : 100 x 3 x 40 x 5 x 6 y : (1 x) 3 x 40 x 1 x 1 Note that the axis of ``x`` to which we apply ``y`` is specified by the argument ``axis``, whose meaning is different from numpy's ``axis``. Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable to be summed. y (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable to sum, broadcasted. axis (int): The first axis of ``x`` along which ``y`` is applied. Returns: ~chainer.Variable: Output variable. """ x_shape = x.shape y_shape = y.shape if chainer.is_debug(): assert x_shape[axis:axis + len(y_shape)] == y_shape y1_shape = tuple([1] * axis + list(y_shape) + [1] * (len(x_shape) - axis - len(y_shape))) y1 = reshape.reshape(y, y1_shape) y2 = broadcast.broadcast_to(y1, x_shape) return x + y2
Example #30
Source File: scale.py From chainer with MIT License | 4 votes |
def scale(x, y, axis=1): """Elementwise product with broadcasting. Computes a elementwise product of two input variables, with the shape of the latter variable broadcasted to match the shape of the former. ``axis`` is the first axis of the first variable along which the second variable is applied. The term "broadcasting" here comes from Caffe's scale layer so the "broadcasting" with the following arguments:: x : 100 x 3 x 40 x 5 x 6 y : 3 x 40 axis : 1 is equivalent to the following numpy broadcasting:: x : 100 x 3 x 40 x 5 x 6 y : (1 x) 3 x 40 x 1 x 1 Note that the axis of ``x`` to which we apply ``y`` is specified by the argument ``axis``, whose meaning is different from numpy's ``axis``. Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable to be scaled. y (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable to scale, broadcasted. axis (int): The first axis of ``x`` along which ``y`` is applied. Returns: ~chainer.Variable: Output variable. """ x_shape = x.shape y_shape = y.shape if chainer.is_debug(): assert x_shape[axis:axis + len(y_shape)] == y_shape y1_shape = tuple([1] * axis + list(y_shape) + [1] * (len(x_shape) - axis - len(y_shape))) y1 = reshape.reshape(y, y1_shape) y2 = broadcast.broadcast_to(y1, x_shape) return x * y2