Python chainer.backends.cuda.available() Examples
The following are 9
code examples of chainer.backends.cuda.available().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.backends.cuda
, or try the search function
.
Example #1
Source File: test_link.py From chainer with MIT License | 6 votes |
def setUp(self): x_shape_0 = 2 x_shape_1 = numpy.int64(3) self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'), u=(None, 'd')) with self.link.init_scope(): self.link.y = chainer.Parameter(shape=(2,)) self.link.v = chainer.Parameter() self.p = numpy.array([1, 2, 3], dtype='f') self.link.add_persistent('p', self.p) self.link.name = 'a' self.link.x.update_rule = chainer.UpdateRule() self.link.x.update_rule.enabled = False self.link.u.update_rule = chainer.UpdateRule() if cuda.available: self.current_device_id = cuda.cupy.cuda.get_device_id()
Example #2
Source File: test_link.py From chainer with MIT License | 6 votes |
def setUp(self): self.link = chainer.Link() shape = (2, 2) dtype = numpy.float32 y_array = numpy.random.rand(*shape).astype(dtype) pa_array = numpy.random.rand(*shape).astype(dtype) ps_scalar = 2.4 with self.link.init_scope(): # Initialized parameter self.link.y = chainer.Parameter(y_array) # Uninitialized parameter self.link.v = chainer.Parameter() # Persistent ndarray self.link.add_persistent('pa', pa_array) # Persistent scalar self.link.add_persistent('ps', ps_scalar) self.y_array = y_array self.pa_array = pa_array self.ps_scalar = ps_scalar if cuda.available: self.current_device_id = cuda.cupy.cuda.get_device_id()
Example #3
Source File: test_link.py From chainer with MIT License | 5 votes |
def tearDown(self): if cuda.available \ and cuda.cupy.cuda.get_device_id() != self.current_device_id: cuda.Device(self.current_device_id).use()
Example #4
Source File: conftest.py From chainer with MIT License | 5 votes |
def pytest_runtest_teardown(item, nextitem): if cuda.available: assert cuda.cupy.cuda.runtime.getDevice() == 0 # testing.run_module(__name__, __file__)
Example #5
Source File: test_cuda.py From chainer with MIT License | 5 votes |
def test_to_gpu_unavailable(self): x = numpy.array([1]) if not cuda.available: with self.assertRaises(RuntimeError): cuda.to_gpu(x)
Example #6
Source File: _runtime_info.py From chainer with MIT License | 5 votes |
def __init__(self): self.chainer_version = chainer.__version__ self.chainerx_available = chainerx.is_available() self.numpy_version = numpy.__version__ self.platform_version = platform.platform() if cuda.available: self.cuda_info = cuda.cupyx.get_runtime_info() else: self.cuda_info = None if intel64.is_ideep_available(): self.ideep_version = intel64.ideep.__version__ else: self.ideep_version = None
Example #7
Source File: backend.py From chainer with MIT License | 5 votes |
def get_array_module(*args): """Gets an appropriate NumPy-compatible module to process arguments This function will return their data arrays' array module for :class:`~chainer.Variable` arguments. Args: args: Values to determine whether NumPy, CuPy, or ChainerX should be used. Returns: module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based on the types of the arguments. """ is_chainerx_available = chainerx.is_available() if is_chainerx_available or cuda.available: arrays = [] for arg in args: # Unwrap arrays if isinstance(arg, chainer.variable.Variable): array = arg.data else: array = arg if is_chainerx_available and isinstance(array, chainerx.ndarray): return chainerx arrays.append(array) if cuda.available: return cuda.cupy.get_array_module(*arrays) return numpy
Example #8
Source File: random.py From chainer with MIT License | 5 votes |
def do_setup(deterministic=True): if cuda.available: cuda.cupy.testing.random.do_setup(deterministic) else: _numpy_do_setup(deterministic)
Example #9
Source File: random.py From chainer with MIT License | 5 votes |
def do_teardown(): if cuda.available: cuda.cupy.testing.random.do_teardown() else: _numpy_do_teardown() # In some tests (which utilize condition.repeat or condition.retry), # setUp/tearDown is nested. _setup_random() and _teardown_random() do their # work only in the outermost setUp/tearDown pair.