Python chainer.backends.cuda.to_cpu() Examples
The following are 30
code examples of chainer.backends.cuda.to_cpu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.backends.cuda
, or try the search function
.
Example #1
Source File: test_dropout.py From chainer with MIT License | 6 votes |
def _check(self, backend_config): mask = self.mask if self.specify_mask else None x, mask = backend_config.get_array((self.x, mask)) with chainer.using_config('train', self.train), backend_config: out, out_mask = functions.dropout( x, 0.5, mask=mask, return_mask=True) if self.train: assert isinstance(out_mask, type(out.array)) if mask is None: assert out_mask.shape == out.array.shape else: assert out_mask is mask else: assert out_mask is None with chainer.using_config('train', self.train): out2 = functions.dropout(self.x, 0.5, mask=cuda.to_cpu(out_mask)) testing.assert_allclose(out.array, out2.array)
Example #2
Source File: test_link.py From chainer with MIT License | 6 votes |
def _check_deepcopy(self, link): self.assertIsInstance(link._params, set) self.assertIsInstance(link._persistent, set) self.assertTrue(hasattr(link, 'x')) self.assertTrue(hasattr(link, 'y')) self.assertTrue(hasattr(link, 'u')) self.assertTrue(hasattr(link, 'p')) self.assertIsNot(link.x, self.link.x) self.assertIsNot(link.x.data, self.link.x.data) numpy.testing.assert_array_equal(cuda.to_cpu(link.x.data), cuda.to_cpu(self.link.x.data)) self.assertIsNot(link.y, self.link.y) self.assertIsNot(link.y.data, self.link.y.data) numpy.testing.assert_array_equal(cuda.to_cpu(link.y.data), cuda.to_cpu(self.link.y.data)) self.assertIsNone(link.u.data) self.assertIsNot(link.p, self.link.p) self.assertEqual(link.name, self.link.name)
Example #3
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_to_cpu_on_cpu(self): x1 = self.l1.x.data gx1 = self.l1.x.grad x2 = self.l2.x.data gx2 = self.l2.x.grad x3 = self.l3.x.data with testing.assert_warns(DeprecationWarning): self.c2.to_cpu() self.assertIs(self.l1.x.data, x1) self.assertIs(self.l1.x.grad, gx1) self.assertIs(self.l2.x.data, x2) self.assertIs(self.l2.x.grad, gx2) self.assertIs(self.l3.x.data, x3) with pytest.raises(RuntimeError): self.l3.x.grad
Example #4
Source File: EspNet_BLSTM.py From chainer-compiler with MIT License | 6 votes |
def forward(self, xs, ilens): '''BLSTM forward (the modified version) :param xs: :param ilens: :return: ''' logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens)) # need to move ilens to cpu ilens = cuda.to_cpu(ilens) hy, cy, ys = self.nblstm(None, None, xs) ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0) del hy, cy # final tanh operation xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0) # EDIT(hamaji): Unnecessary, as `force_tuple` is True by default. # # 1 utterance case, it becomes an array, so need to make a utt tuple # if not isinstance(xs, tuple): # xs = [xs] return xs, ilens # x: utt list of frame x dim
Example #5
Source File: EspNet_BLSTM.py From chainer-compiler with MIT License | 6 votes |
def original(self, xs, ilens): '''BLSTM forward (the original implementation) :param xs: :param ilens: :return: ''' logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens)) # need to move ilens to cpu ilens = cuda.to_cpu(ilens) hy, cy, ys = self.nblstm(None, None, xs) ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0) del hy, cy # final tanh operation xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0) # 1 utterance case, it becomes an array, so need to make a utt tuple if not isinstance(xs, tuple): xs = [xs] return xs, ilens # x: utt list of frame x dim
Example #6
Source File: EspNet_BLSTM.py From chainer-compiler with MIT License | 6 votes |
def original(self, xs, ilens): '''BLSTM forward (the original implementation) :param xs: :param ilens: :return: ''' logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens)) # need to move ilens to cpu ilens = cuda.to_cpu(ilens) hy, cy, ys = self.nblstm(None, None, xs) ys = self.l_last(F.vstack(ys)) # (sum _utt frame_utt) x dim xs = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0) del hy, cy # final tanh operation xs = F.split_axis(F.tanh(F.vstack(xs)), np.cumsum(ilens[:-1]), axis=0) # 1 utterance case, it becomes an array, so need to make a utt tuple if not isinstance(xs, tuple): xs = [xs] return xs, ilens # x: utt list of frame x dim
Example #7
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_to_cpu_on_cpu(self): x = self.link.x.data gx = self.link.x.grad y = self.link.y.data gy = self.link.y.grad p = self.link.p with testing.assert_warns(DeprecationWarning): self.link.to_cpu() self.assertIs(self.link.x.data, x) self.assertIs(self.link.x.grad, gx) self.assertIs(self.link.y.data, y) self.assertIs(self.link.y.grad, gy) self.assertIsNone(self.link.u.data) u = self.link.u with pytest.raises(RuntimeError): u.grad self.assertIs(self.link.p, p)
Example #8
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_to_cpu(self): self.set_count_parameters() with testing.assert_warns(DeprecationWarning): self.c2.to_gpu() with testing.assert_warns(DeprecationWarning): self.c2.to_cpu() self.assertIs(self.c2.xp, numpy) self.assertIs(self.c1.xp, numpy) self.assertIs(self.l1.xp, numpy) self.assertIs(self.l2.xp, numpy) self.assertIs(self.l3.xp, numpy) self.assertIsInstance(self.l1.x.data, numpy.ndarray) self.assertIsInstance(self.l1.x.grad, numpy.ndarray) self.assertIsInstance(self.l2.x.data, numpy.ndarray) self.assertIsInstance(self.l2.x.grad, numpy.ndarray) self.assertIsNone(self.l3.x.data) self.assertIsNone(self.l3.x.grad) self.l3.x.initialize(3) self.assertIsInstance(self.l3.x.data, numpy.ndarray) self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
Example #9
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_to_cpu_on_cpu(self): x1 = self.l1.x.data gx1 = self.l1.x.grad x2 = self.l2.x.data gx2 = self.l2.x.grad x3 = self.l3.x.data gx3 = self.l3.x.grad with testing.assert_warns(DeprecationWarning): self.c2.to_cpu() self.assertIs(self.l1.x.data, x1) self.assertIs(self.l1.x.grad, gx1) self.assertIs(self.l2.x.data, x2) self.assertIs(self.l2.x.grad, gx2) self.assertIs(self.l3.x.data, x3) self.assertIs(self.l3.x.grad, gx3)
Example #10
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_to_cpu(self): with testing.assert_warns(DeprecationWarning): self.c2.to_gpu() with testing.assert_warns(DeprecationWarning): self.c2.to_cpu() self.assertIs(self.c2.xp, numpy) self.assertIs(self.c1.xp, numpy) self.assertIs(self.l1.xp, numpy) self.assertIs(self.l2.xp, numpy) self.assertIs(self.l3.xp, numpy) self.assertIsInstance(self.l1.x.data, numpy.ndarray) self.assertIsInstance(self.l1.x.grad, numpy.ndarray) self.assertIsInstance(self.l2.x.data, numpy.ndarray) self.assertIsInstance(self.l2.x.grad, numpy.ndarray) self.assertIsInstance(self.l3.x.data, numpy.ndarray) self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
Example #11
Source File: test_tuple_dataset.py From chainer with MIT License | 6 votes |
def check_tuple_dataset(self, x0, x1): td = datasets.TupleDataset(x0, x1) self.assertEqual(len(td), len(x0)) for i in range(len(x0)): example = td[i] self.assertEqual(len(example), 2) numpy.testing.assert_array_equal( cuda.to_cpu(example[0]), cuda.to_cpu(x0[i])) numpy.testing.assert_array_equal( cuda.to_cpu(example[1]), cuda.to_cpu(x1[i])) example_range = td[0: len(x0)] for i in range(len(x0)): example = example_range[i] self.assertEqual(len(example), 2) numpy.testing.assert_array_equal( cuda.to_cpu(example[0]), cuda.to_cpu(x0[i])) numpy.testing.assert_array_equal( cuda.to_cpu(example[1]), cuda.to_cpu(x1[i]))
Example #12
Source File: test_dict_dataset.py From chainer with MIT License | 6 votes |
def check_dict_dataset(self, x, y): dd = datasets.DictDataset(x=x, y=y) self.assertEqual(len(dd), len(x)) for i in range(len(x)): example = dd[i] self.assertIn('x', example) self.assertIn('y', example) numpy.testing.assert_array_equal( cuda.to_cpu(example['x']), cuda.to_cpu(x[i])) numpy.testing.assert_array_equal( cuda.to_cpu(example['y']), cuda.to_cpu(y[i])) example_range = dd[0: len(x)] for i in range(len(x)): example = example_range[i] self.assertIn('x', example) self.assertIn('y', example) numpy.testing.assert_array_equal( cuda.to_cpu(example['x']), cuda.to_cpu(x[i])) numpy.testing.assert_array_equal( cuda.to_cpu(example['y']), cuda.to_cpu(y[i]))
Example #13
Source File: test_vision.py From chainer with MIT License | 6 votes |
def check_extract(self): x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8) x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8) result = self.link.extract([x1, x2], layers=['pool3', 'fc7']) assert len(result) == 2 y1 = cuda.to_cpu(result['pool3'].data) assert y1.shape == (2, 256, 28, 28) assert y1.dtype == self.dtype y2 = cuda.to_cpu(result['fc7'].data) assert y2.shape == (2, 4096) assert y2.dtype == self.dtype x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8) result = self.link.extract([x3], layers=['pool1'], size=None) assert len(result) == 1 y3 = cuda.to_cpu(result['pool1'].data) assert y3.shape == (1, 64, 40, 30) assert y3.dtype == self.dtype
Example #14
Source File: test_vision.py From chainer with MIT License | 6 votes |
def check_extract(self): x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8) x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8) result = self.link.extract([x1, x2], layers=['pool5', 'loss3_fc']) assert len(result) == 2 y1 = cuda.to_cpu(result['pool5'].data) assert y1.shape == (2, 1024, 1, 1) assert y1.dtype == self.dtype y2 = cuda.to_cpu(result['loss3_fc'].data) assert y2.shape == (2, 1000) assert y2.dtype == self.dtype x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8) result = self.link.extract([x3], layers=['pool1'], size=None) assert len(result) == 1 y3 = cuda.to_cpu(result['pool1'].data) assert y3.shape == (1, 64, 20, 15) assert y3.dtype == self.dtype
Example #15
Source File: test_spatial_transformer_grid.py From chainer with MIT License | 6 votes |
def check_forward(self, theta, output_shape): grid = functions.spatial_transformer_grid(theta, output_shape).data theta = cuda.to_cpu(theta) B = theta.shape[0] H, W = output_shape expected = [] for b in range(B): for i in numpy.linspace(-1., 1., H): for j in numpy.linspace(-1., 1., W): coord = numpy.array([j, i, 1]) expected.append(self.theta[b].dot(coord)) expected = numpy.array( expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2) testing.assert_allclose(grid, expected, **self.check_forward_options) self.assertEqual(grid.dtype, self.dtype)
Example #16
Source File: test_sigmoid_cross_entropy.py From chainer with MIT License | 6 votes |
def check_forward_no_reduction(self, x_data, t_data): x_val = chainer.Variable(x_data) t_val = chainer.Variable(t_data) loss = functions.sigmoid_cross_entropy( x_val, t_val, self.normalize, reduce='no') self.assertEqual(loss.data.shape, self.x.shape) self.assertEqual(loss.data.dtype, self.dtype) loss_value = cuda.to_cpu(loss.data) # Compute expected value if not getattr(self, 'ignore_all', False): for i in six.moves.range(self.x.shape[0]): for j in six.moves.range(self.x.shape[1]): xd, td = self.x[i, j], self.t[i, j] if td == -1: loss_expect = 0 else: loss_expect = -( xd * (td - (xd >= 0)) - math.log(1 + math.exp(-numpy.abs(xd)))) self.assertAlmostEqual( loss_expect, loss_value[i, j], places=self.places)
Example #17
Source File: test_huber_loss.py From chainer with MIT License | 6 votes |
def check_forward(self, x_data, t_data): x = chainer.Variable(x_data) t = chainer.Variable(t_data) loss = functions.huber_loss(x, t, delta=1, reduce=self.reduce) self.assertEqual(loss.data.dtype, self.dtype) loss_value = cuda.to_cpu(loss.data) diff_data = cuda.to_cpu(x_data) - cuda.to_cpu(t_data) loss_expect = numpy.zeros(self.shape) mask = numpy.abs(diff_data) < 1 loss_expect[mask] = 0.5 * diff_data[mask] ** 2 loss_expect[~mask] = numpy.abs(diff_data[~mask]) - 0.5 if self.reduce == 'sum_along_second_axis': loss_expect = numpy.sum(loss_expect, axis=1) testing.assert_allclose( loss_value, loss_expect, **self.forward_options)
Example #18
Source File: test_link.py From chainer with MIT License | 6 votes |
def test_intel64_to_cpu(self): link = self.link with testing.assert_warns(DeprecationWarning): link.to_intel64() assert isinstance(link.device, backend.Intel64Device) with testing.assert_warns(DeprecationWarning): link.to_cpu() assert isinstance(link.device, backend.CpuDevice) # Arrays should be converted to numpy.ndarray # Initialized parameter assert isinstance(link.y.data, numpy.ndarray) _assert_variable_array_equal(link.y, self.y_array) # Uninitialized parameter assert link.v.data is None # Persistent ndarray assert isinstance(link.pa, numpy.ndarray) _assert_arrays_equal(link.pa, self.pa_array) # Persistent scalar assert link.ps == self.ps_scalar
Example #19
Source File: test_triplet.py From chainer with MIT License | 6 votes |
def check_forward(self, a_data, p_data, n_data): a_val = chainer.Variable(a_data) p_val = chainer.Variable(p_data) n_val = chainer.Variable(n_data) loss = functions.triplet(a_val, p_val, n_val, self.margin, self.reduce) if self.reduce == 'mean': self.assertEqual(loss.data.shape, ()) else: self.assertEqual(loss.data.shape, (self.batchsize,)) self.assertEqual(loss.data.dtype, self.dtype) loss_value = cuda.to_cpu(loss.data) # # Compute expected value # loss_expect = numpy.empty((self.a.shape[0],), dtype=self.dtype) for i in six.moves.range(self.a.shape[0]): ad, pd, nd = self.a[i], self.p[i], self.n[i] dp = numpy.sum((ad - pd) ** 2) dn = numpy.sum((ad - nd) ** 2) loss_expect[i] = max((dp - dn + self.margin), 0) if self.reduce == 'mean': loss_expect = loss_expect.mean() numpy.testing.assert_allclose( loss_expect, loss_value, **self.check_forward_options)
Example #20
Source File: test_vision.py From chainer with MIT License | 6 votes |
def check_extract(self): x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8) x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8) with numpy.errstate(divide='ignore'): result = self.link.extract([x1, x2], layers=['res3', 'pool5']) assert len(result) == 2 y1 = cuda.to_cpu(result['res3'].data) assert y1.shape == (2, 512, 28, 28) assert y1.dtype == self.dtype y2 = cuda.to_cpu(result['pool5'].data) assert y2.shape == (2, 2048) assert y2.dtype == self.dtype x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8) result = self.link.extract([x3], layers=['res2'], size=None) assert len(result) == 1 y3 = cuda.to_cpu(result['res2'].data) assert y3.shape == (1, 256, 20, 15) assert y3.dtype == self.dtype
Example #21
Source File: test_select_item.py From chainer with MIT License | 5 votes |
def check_forward(self, x_data, t_data): x = chainer.Variable(x_data) t = chainer.Variable(t_data) y = functions.select_item(x, t) y_exp = cuda.to_cpu(x_data)[range(t_data.size), cuda.to_cpu(t_data)] self.assertEqual(y.data.dtype, self.dtype) numpy.testing.assert_equal(cuda.to_cpu(y.data), y_exp)
Example #22
Source File: test_space_2_depth.py From chainer with MIT License | 5 votes |
def check_forward(self, space_data, depth_data): space = chainer.Variable(space_data) s2d = functions.space2depth(space, self.r) s2d_value = cuda.to_cpu(s2d.data) self.assertEqual(s2d_value.dtype, self.dtype) self.assertEqual(s2d_value.shape, (2, 8, 3, 2)) s2d_expect = depth_data testing.assert_allclose(s2d_value, s2d_expect)
Example #23
Source File: test_im2col.py From chainer with MIT License | 5 votes |
def check_forward(self, x, kh, kw, sy, sx, ph, pw, dy, dx, gpu): x = x.copy() n, c, h, w = x.shape col = functions.im2col( x, (kh, kw), (sy, sx), (ph, pw), dilate=(dy, dx)).data col_h = get_conv_outsize(h, kh, sy, ph, d=dy) col_w = get_conv_outsize(w, kw, sx, pw, d=dx) self.assertEqual(col.shape, (n, c * kh * kw, col_h, col_w)) col = col.reshape(n, c, kh, kw, col_h, col_w) col = cuda.to_cpu(col) for y in moves.range(col_h): for x in moves.range(col_w): for ky in moves.range(kh): for kx in moves.range(kw): oy = y * sy - ph + ky * dy ox = x * sx - pw + kx * dx if 0 <= oy < h and 0 <= ox < w: testing.assert_allclose( col[:, :, ky, kx, y, x], self.x[:, :, oy, ox]) else: testing.assert_allclose( col[:, :, ky, kx, y, x], numpy.zeros((2, 3), self.dtype))
Example #24
Source File: test_ndtri.py From chainer with MIT License | 5 votes |
def _ndtri_gpu(x, dtype): return cuda.to_gpu(_ndtri_cpu(cuda.to_cpu(x), dtype))
Example #25
Source File: test_trigonometric.py From chainer with MIT License | 5 votes |
def check_forward(self, x1_data, x2_data): y = F.arctan2(x1_data, x2_data) numpy.testing.assert_array_less( cuda.to_cpu(y.data), numpy.full(y.shape, numpy.pi)) numpy.testing.assert_array_less( numpy.full(y.shape, -numpy.pi), cuda.to_cpu(y.data)) testing.assert_allclose( numpy.arctan2(self.x1, self.x2), y.data, atol=1e-4, rtol=1e-4)
Example #26
Source File: test_erfc.py From chainer with MIT License | 5 votes |
def _erfc_gpu(x, dtype): return cuda.to_gpu(_erfc_cpu(cuda.to_cpu(x), dtype))
Example #27
Source File: test_erfinv.py From chainer with MIT License | 5 votes |
def _erfinv_gpu(x, dtype): return cuda.to_gpu(_erfinv_cpu(cuda.to_cpu(x), dtype))
Example #28
Source File: test_ndtr.py From chainer with MIT License | 5 votes |
def _ndtr_gpu(x, dtype): return cuda.to_gpu(_ndtr_cpu(cuda.to_cpu(x), dtype))
Example #29
Source File: test_log_ndtr.py From chainer with MIT License | 5 votes |
def _log_ndtr_gpu(x, dtype): return cuda.to_gpu(_log_ndtr_cpu(cuda.to_cpu(x), dtype))
Example #30
Source File: test_depth_2_space.py From chainer with MIT License | 5 votes |
def check_forward(self, depth_data, space_data): depth = chainer.Variable(depth_data) d2s = functions.depth2space(depth, self.r) d2s_value = cuda.to_cpu(d2s.data) self.assertEqual(d2s_value.dtype, self.dtype) self.assertEqual(d2s_value.shape, (2, 2, 6, 4)) d2s_expect = space_data testing.assert_allclose(d2s_value, d2s_expect)