Python chainer.backends.cuda.cupy() Examples

The following are 30 code examples of chainer.backends.cuda.cupy(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.backends.cuda , or try the search function .
Example #1
Source File: test_function_node.py    From chainer with MIT License 6 votes vote down vote up
def _dummy_func(self, bwd_return_data):
        # Create a dummy func that returns `bwd_return_data` in the
        # `backward` method.

        def one(xp):
            return xp.array(1, numpy.float32)

        class DummyFunc(chainer.FunctionNode):
            def forward_cpu(self, inputs):
                return one(numpy),

            def forward_gpu(self, inputs):
                return one(cuda.cupy),

            def backward(self, indexes, grad_outputs):
                return bwd_return_data

        return DummyFunc() 
Example #2
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def setUp(self):
        self.link = chainer.Link()
        shape = (2, 2)
        dtype = numpy.float32
        y_array = numpy.random.rand(*shape).astype(dtype)
        pa_array = numpy.random.rand(*shape).astype(dtype)
        ps_scalar = 2.4

        with self.link.init_scope():
            # Initialized parameter
            self.link.y = chainer.Parameter(y_array)
            # Uninitialized parameter
            self.link.v = chainer.Parameter()
            # Persistent ndarray
            self.link.add_persistent('pa', pa_array)
            # Persistent scalar
            self.link.add_persistent('ps', ps_scalar)
        self.y_array = y_array
        self.pa_array = pa_array
        self.ps_scalar = ps_scalar

        if cuda.available:
            self.current_device_id = cuda.cupy.cuda.get_device_id() 
Example #3
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def test_intel64_to_gpu(self):
        link = self.link
        with testing.assert_warns(DeprecationWarning):
            link.to_intel64()
        assert isinstance(link.device, backend.Intel64Device)
        with testing.assert_warns(DeprecationWarning):
            link.to_gpu()
        assert link.device.device == cuda.Device(0)

        # Arrays should be converted to cupy.ndarray

        # Initialized parameter
        assert isinstance(link.y.data, cuda.cupy.ndarray)
        _assert_variable_array_equal(link.y, self.y_array)
        # Uninitialized parameter
        assert link.v.data is None
        # Persistent ndarray
        assert isinstance(link.pa, cuda.ndarray)
        _assert_arrays_equal(link.pa, self.pa_array)
        # Persistent scalar
        assert link.ps == self.ps_scalar 
Example #4
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def test_to_gpu(self):
        self.set_count_parameters()
        cupy = cuda.cupy
        with testing.assert_warns(DeprecationWarning):
            self.c2.to_gpu()
        self.assertIs(self.c2.xp, cupy)
        self.assertIs(self.c1.xp, cupy)
        self.assertIs(self.l1.xp, cupy)
        self.assertIs(self.l2.xp, cupy)
        self.assertIs(self.l3.xp, cupy)
        self.assertIsInstance(self.l1.x.data, cupy.ndarray)
        self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
        self.assertIsInstance(self.l2.x.data, cupy.ndarray)
        self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
        self.assertIsNone(self.l3.x.data)
        self.assertIsNone(self.l3.x.grad)

        self.l3.x.initialize(3)
        self.assertIsInstance(self.l3.x.data, cupy.ndarray)
        self.assertIsInstance(self.l3.x.grad, cupy.ndarray) 
Example #5
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def test_to_chx(self, backend_config):
        self.link.to_device(backend_config.device)
        self.link.to_chx()

        source_device = backend_config.device

        self.check_param_init('x', (2, 3), 'd')
        self.check_param_init('y', (2,), 'f')
        self.check_param_uninit('u')

        if source_device.xp is chainerx:
            expected_device = source_device
        elif source_device.xp is numpy:
            expected_device = backend.ChainerxDevice(
                chainerx.get_device('native', 0))
        elif source_device.xp is cuda.cupy:
            expected_device = backend.ChainerxDevice(
                chainerx.get_device('cuda', source_device.device.id))
        else:
            assert False

        self.assertEqual(self.link.device, expected_device) 
Example #6
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def test_copy_and_to_gpu_uninit_multi_gpu(self):
        cupy = cuda.cupy
        l0 = self.link
        l1 = l0.copy()
        l2 = l0.copy()
        self.assertIsNone(l0.u.data)
        self.assertIsNone(l1.u.data)
        self.assertIsNone(l2.u.data)
        with testing.assert_warns(DeprecationWarning):
            l1.to_gpu()
        l1.u.initialize((2, 3))
        with testing.assert_warns(DeprecationWarning):
            l2.to_gpu()
        l2.u.initialize((2, 3))
        self.assertIsNone(l0.u.data)
        self.assertIsInstance(l1.u.data, cupy.ndarray)
        self.assertIsInstance(l2.u.data, cupy.ndarray)
        self.assertNotEqual(l1.u.data.data, l2.u.data.data) 
Example #7
Source File: test_zoneoutlstm.py    From chainer with MIT License 6 votes vote down vote up
def check_to_cpu_to_gpu(self, c, h):
        self.link.c = c
        self.link.h = h
        with testing.assert_warns(DeprecationWarning):
            self.link.to_gpu()
        self.assertIs(self.link.xp, cuda.cupy)
        self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
        self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
        with testing.assert_warns(DeprecationWarning):
            self.link.to_gpu()
        self.assertIs(self.link.xp, cuda.cupy)
        self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
        self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
        with testing.assert_warns(DeprecationWarning):
            self.link.to_cpu()
        self.assertIs(self.link.xp, numpy)
        self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
        self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
        with testing.assert_warns(DeprecationWarning):
            self.link.to_gpu()
        self.assertIs(self.link.xp, cuda.cupy)
        self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
        self.assertIsInstance(self.link.h.data, self.link.xp.ndarray) 
Example #8
Source File: test_link.py    From chainer with MIT License 6 votes vote down vote up
def setUp(self):
        x_shape_0 = 2
        x_shape_1 = numpy.int64(3)
        self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
                                 u=(None, 'd'))
        with self.link.init_scope():
            self.link.y = chainer.Parameter(shape=(2,))
            self.link.v = chainer.Parameter()
        self.p = numpy.array([1, 2, 3], dtype='f')
        self.link.add_persistent('p', self.p)
        self.link.name = 'a'
        self.link.x.update_rule = chainer.UpdateRule()
        self.link.x.update_rule.enabled = False
        self.link.u.update_rule = chainer.UpdateRule()
        if cuda.available:
            self.current_device_id = cuda.cupy.cuda.get_device_id() 
Example #9
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_backward_accumulate_gpu(self):
        self._to_gpu()
        self.check_backward_accumulate(cuda.cupy) 
Example #10
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_debug_print_gpu(self):
        v = chainer.Variable(self.arr)
        v.to_gpu(0)

        result = v.debug_print()
        assert 'device: <CUDA Device 0>' in result
        assert 'cupy.core.core.ndarray' in result

        self.check_debug_print(v, mean=float(cuda.cupy.mean(v.data)),
                               std=float(cuda.cupy.std(v.data))) 
Example #11
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_backward_gpu(self):
        ret = self.create_linear_chain(2, cuda.cupy)
        self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), False)

    # TODO(kataoka): Variable.backward with ChainerX backend unexpectedly
    # behaves like retain_grad=True 
Example #12
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_backward_accumulate_gpu(self):
        self.check_backward_accumulate(cuda.cupy) 
Example #13
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_backward_gpu_retain_grad(self):
        ret = self.create_linear_chain(2, cuda.cupy)
        self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), True) 
Example #14
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_double_backprop_gpu(self):
        self.check_double_backprop(cuda.cupy) 
Example #15
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_unchain_backward_gpu(self):
        ret = self.create_linear_chain(3, cuda.cupy)
        ret[1].unchain_backward()
        self.check_backward((ret[1], ), (ret[2], ), (ret[3], ), False) 
Example #16
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_cleargrad_gpu(self):
        self.check_cleargrad(cuda.cupy.empty(3, dtype=np.float32)) 
Example #17
Source File: test_deconvolution_2d.py    From chainer with MIT License 5 votes vote down vote up
def before_test(self, test_name):
        # cuDNN 5 and 5.1 results suffer from precision issues
        using_old_cudnn = (self.backend_config.xp is cuda.cupy
                           and self.backend_config.use_cudnn == 'always'
                           and cuda.cuda.cudnn.getVersion() < 6000)
        if using_old_cudnn:
            self.check_backward_options.update({'atol': 3e-2, 'rtol': 5e-2}) 
Example #18
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_addgrad_to_uninitialized_parameter_gpu_to_another_gpu(self):
        x = chainer.Parameter()
        y = chainer.Parameter(self.a)
        y.grad = self.b
        x.to_gpu(1)
        y.to_gpu(0)
        x.cleargrad()
        x.addgrad(y)
        cp = cuda.cupy
        assert isinstance(x.data, cp.ndarray)
        assert isinstance(x.grad, cp.ndarray)
        assert int(x.data.device) == 1
        assert int(x.grad.device) == 1
        cp.testing.assert_array_equal(x.grad, self.b) 
Example #19
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_addgrad_to_uninitialized_parameter_gpu_to_gpu(self):
        x = chainer.Parameter()
        y = chainer.Parameter(self.a)
        y.grad = self.b
        x.to_gpu()
        y.to_gpu()
        x.cleargrad()
        x.addgrad(y)
        cp = cuda.cupy
        assert isinstance(x.data, cp.ndarray)
        assert isinstance(x.grad, cp.ndarray)
        cp.testing.assert_array_equal(x.grad, self.b) 
Example #20
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def get_array(xp, arr):
    if xp is np:
        return arr
    if xp is cuda.cupy:
        return cuda.to_gpu(arr)
    if xp is chainerx:
        return chainerx.array(arr)
    assert False 
Example #21
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def backward_gpu(self, inputs, grad_outputs):
        return tuple(map(cuda.cupy.zeros_like, inputs)) 
Example #22
Source File: test_convert.py    From chainer with MIT License 5 votes vote down vote up
def get_xp(gpu):
    if gpu:
        return cuda.cupy
    else:
        return numpy 
Example #23
Source File: test_optimizer.py    From chainer with MIT License 5 votes vote down vote up
def test_update(self, backend_config):
        device = backend_config.device
        override_pattern = self.override_pattern
        optimizer, call_record = self.create(device)

        optimizer.update()

        self.assertEqual(len(call_record), 3)

        # Detemine the expected method name that was called.
        if override_pattern == 'generic':
            method_name = 'update_core'
        elif override_pattern == 'cpu_gpu':
            if isinstance(device, backend.ChainerxDevice):
                xp = device.fallback_device.xp
            else:
                xp = device.xp

            if xp is np:
                method_name = 'update_core_cpu'
            else:
                assert xp is cuda.cupy
                method_name = 'update_core_gpu'
        elif override_pattern == 'cpu_gpu_chx':
            if isinstance(device, backend.ChainerxDevice):
                method_name = 'update_core_chainerx'
            elif device.xp is np:
                method_name = 'update_core_cpu'
            else:
                assert device.xp is cuda.cupy
                method_name = 'update_core_gpu'
        else:
            assert False, override_pattern

        # Check call record.
        # TODO(niboshi): Check the param argument as well.
        self.assertEqual(call_record[0][0], method_name)
        self.assertEqual(call_record[1][0], method_name)
        self.assertEqual(call_record[2][0], method_name) 
Example #24
Source File: test_multiprocess_parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def test_cuda_init_forkserver(self):
        ret, stdoutdata, stderrdata = _run_test_snippet(
            'cuda_init.py', '@cupy:0', 'forkserver')
        assert ret == 0, (
            '[stdout]:{!r}\n'
            '[stderr]:{!r}'.format(stdoutdata, stderrdata)) 
Example #25
Source File: test_multiprocess_parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def test_cuda_init_spawn(self):
        ret, stdoutdata, stderrdata = _run_test_snippet(
            'cuda_init.py', '@cupy:0', 'spawn')
        assert ret == 0, (
            '[stdout]:{!r}\n'
            '[stderr]:{!r}'.format(stdoutdata, stderrdata)) 
Example #26
Source File: test_multiprocess_parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def test_cuda_init_fork(self):
        ret, stdoutdata, stderrdata = _run_test_snippet(
            'cuda_init.py', '@cupy:0', 'fork')
        assert ret == 0, (
            '[stdout]:{!r}\n'
            '[stderr]:{!r}'.format(stdoutdata, stderrdata)) 
Example #27
Source File: test_multiprocess_parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def check_with_devices(self, n_devices):
        devices_str = ','.join([
            '@cupy:{}'.format(device_id) for device_id in range(n_devices)])
        ret, stdoutdata, stderrdata = _run_test_snippet(
            'child_reporter.py', devices_str)
        assert ret == 0, (
            '[stdout]:{!r}\n'
            '[stderr]:{!r}'.format(stdoutdata, stderrdata)) 
Example #28
Source File: test_multiprocess_parallel_updater.py    From chainer with MIT License 5 votes vote down vote up
def test_update_uses_raw_array(self):
        ret, stdoutdata, stderrdata = _run_test_snippet(
            'raw_array.py', '@cupy:0')
        assert ret == 0, (
            '[stdout]:{!r}\n'
            '[stderr]:{!r}'.format(stdoutdata, stderrdata)) 
Example #29
Source File: test_backend.py    From chainer with MIT License 5 votes vote down vote up
def test_chainerx_cuda_to_cupy_multigpu(self):
        orig = self.orig_chainerx('cuda:0')
        converted = self.send_check_equal(orig, '@cupy:1')
        assert isinstance(converted, cuda.ndarray)
        assert converted.device.id == 1

        # memory must not be shared
        converted_copy = converted.copy()
        with cuda.Device(1):
            converted[:] *= 2
        numpy.testing.assert_array_equal(
            backend.CpuDevice().send(orig),
            backend.CpuDevice().send(converted_copy)) 
Example #30
Source File: test_variable.py    From chainer with MIT License 5 votes vote down vote up
def test_cleargrad_fill_gpu(self):
        self.check_cleargrad(cuda.cupy.empty(3, dtype=np.float32), fill=True)