Python chainer.cuda.available() Examples
The following are 4
code examples of chainer.cuda.available().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.cuda
, or try the search function
.
Example #1
Source File: nonbias_weight_decay.py From async-rl with MIT License | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T p, T decay', 'T g', 'g += decay * p', 'weight_decay') rate = self.rate for name, param in opt.target.namedparams(): if name == 'b' or name.endswith('/b'): continue p, g = param.data, param.grad with cuda.get_device(p) as dev: if int(dev) == -1: g += rate * p else: kernel(p, rate, g)
Example #2
Source File: weight_clip.py From GUINNESS with GNU General Public License v2.0 | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T low, T high', 'T p', 'p = (p < low) ? low : (p > high) ? high : p', 'weight_clip') for param in opt.target.params(): p = param.data with cuda.get_device(p) as dev: if int(dev) == -1: numpy.clip(p, self.low, self.high) else: kernel(self.low, self.high, p)
Example #3
Source File: weight_clip.py From binary_net with Apache License 2.0 | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T low, T high', 'T p', 'p = (p < low) ? low : (p > high) ? high : p', 'weight_clip') for param in opt.target.params(): p = param.data with cuda.get_device(p) as dev: if int(dev) == -1: numpy.clip(p, self.low, self.high) else: kernel(self.low, self.high, p)
Example #4
Source File: categorical_dqn.py From chainerrl with MIT License | 4 votes |
def _apply_categorical_projection(y, y_probs, z): """Apply categorical projection. See Algorithm 1 in https://arxiv.org/abs/1707.06887. Args: y (ndarray): Values of atoms before projection. Its shape must be (batch_size, n_atoms). y_probs (ndarray): Probabilities of atoms whose values are y. Its shape must be (batch_size, n_atoms). z (ndarray): Values of atoms after projection. Its shape must be (n_atoms,). It is assumed that the values are sorted in ascending order and evenly spaced. Returns: ndarray: Probabilities of atoms whose values are z. """ batch_size, n_atoms = y.shape assert z.shape == (n_atoms,) assert y_probs.shape == (batch_size, n_atoms) delta_z = z[1] - z[0] v_min = z[0] v_max = z[-1] xp = cuda.get_array_module(z) y = xp.clip(y, v_min, v_max) # bj: (batch_size, n_atoms) bj = (y - v_min) / delta_z assert bj.shape == (batch_size, n_atoms) # Avoid the error caused by inexact delta_z bj = xp.clip(bj, 0, n_atoms - 1) # l, u: (batch_size, n_atoms) l, u = xp.floor(bj), xp.ceil(bj) assert l.shape == (batch_size, n_atoms) assert u.shape == (batch_size, n_atoms) if cuda.available and xp is cuda.cupy: scatter_add = cuda.cupyx.scatter_add else: scatter_add = np.add.at z_probs = xp.zeros((batch_size, n_atoms), dtype=xp.float32) offset = xp.arange( 0, batch_size * n_atoms, n_atoms, dtype=xp.int32)[..., None] # Accumulate m_l # Note that u - bj in the original paper is replaced with 1 - (bj - l) to # deal with the case when bj is an integer, i.e., l = u = bj scatter_add( z_probs.ravel(), (l.astype(xp.int32) + offset).ravel(), (y_probs * (1 - (bj - l))).ravel()) # Accumulate m_u scatter_add( z_probs.ravel(), (u.astype(xp.int32) + offset).ravel(), (y_probs * (bj - l)).ravel()) return z_probs