Python chainer.cuda.get_device() Examples
The following are 27
code examples of chainer.cuda.get_device().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.cuda
, or try the search function
.
Example #1
Source File: encoder_decoder.py From knmt with GNU General Public License v3.0 | 6 votes |
def give_conditionalized_cell(self, src_batch, src_mask, noise_on_prev_word=False, demux=False): if self.lexical_probability_dictionary is not None: lexicon_probability_matrix = compute_lexicon_matrix( src_batch, self.lexical_probability_dictionary, self.Vo) if self.xp != np: lexicon_probability_matrix = cuda.to_gpu( lexicon_probability_matrix, cuda.get_device( self.dec.lin_o.W.data)) else: lexicon_probability_matrix = None fb_concat = self.enc(src_batch, src_mask) mb_size, nb_elems, Hi = fb_concat.data.shape return self.dec.give_conditionalized_cell(fb_concat, src_mask, noise_on_prev_word=noise_on_prev_word, lexicon_probability_matrix=lexicon_probability_matrix, lex_epsilon=self.lex_epsilon, demux=demux)
Example #2
Source File: graph_convolution.py From chainer-graph-cnn with MIT License | 6 votes |
def forward_gpu(self, inputs): x, W = inputs[:2] n_batch, c_in, N = x.shape b = inputs[2] if len(inputs) == 3 else None xp = cuda.get_array_module(x) with cuda.get_device(x.data): K = self.K LmI_data, LmI_indices, LmI_indptr = self.LmI_tuple if x.dtype != LmI_data.dtype: LmI_data = LmI_data.astype(x.dtype) C = xp.empty((K, N, c_in, n_batch), dtype=x.dtype) chebyshev_matvec_gpu(C, x, K, n_batch, LmI_data, LmI_indices, LmI_indptr) C = C.transpose((3, 2, 0, 1)) self.C = C y = xp.tensordot(C, W, ((1, 2), (1, 2))) if b is not None: y += b return xp.rollaxis(y, 2, 1), # y.shape = (n_batch, c_out, N)
Example #3
Source File: graph_convolution.py From chainer-graph-cnn with MIT License | 6 votes |
def __call__(self, x): """Applies the graph convolutional layer. Args: x: (~chainer.Variable): Input graph signal. Returns: ~chainer.Variable: Output of the graph convolution. """ if self.has_uninitialized_params: with cuda.get_device(self._device_id): self._initialize_params(x.shape[1]) if self.b is None: return self.func(x, self.W) else: return self.func(x, self.W, self.b)
Example #4
Source File: updater.py From chainer-wasserstein-gan with MIT License | 6 votes |
def __init__(self, *, iterator, noise_iterator, optimizer_generator, optimizer_critic, device=-1): if optimizer_generator.target.name is None: optimizer_generator.target.name = 'generator' if optimizer_critic.target.name is None: optimizer_critic.target.name = 'critic' iterators = {'main': iterator, 'z': noise_iterator} optimizers = {'generator': optimizer_generator, 'critic': optimizer_critic} super().__init__(iterators, optimizers, device=device) if device >= 0: cuda.get_device(device).use() [optimizer.target.to_gpu() for optimizer in optimizers.values()] self.xp = cuda.cupy if device >= 0 else np
Example #5
Source File: nesterov_ag.py From ram with MIT License | 5 votes |
def init_state(self, param, state): xp = cuda.get_array_module(param.data) with cuda.get_device(param.data): state['v'] = xp.zeros_like(param.data)
Example #6
Source File: deel.py From deel with MIT License | 5 votes |
def __init__(self,gpu=-1): Deel.singleton = self Deel.gpu=gpu if gpu>=0: cuda.get_device(gpu).use() Deel.xp = cuda.cupy if gpu >= 0 else np
Example #7
Source File: weight_clip.py From binary_net with Apache License 2.0 | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T low, T high', 'T p', 'p = (p < low) ? low : (p > high) ? high : p', 'weight_clip') for param in opt.target.params(): p = param.data with cuda.get_device(p) as dev: if int(dev) == -1: numpy.clip(p, self.low, self.high) else: kernel(self.low, self.high, p)
Example #8
Source File: qlearning.py From malmo-challenge with MIT License | 5 votes |
def __init__(self, model, target, device_id=-1, learning_rate=0.00025, momentum=.9, minibatch_size=32, update_interval=10000): assert isinstance(model, ChainerModel), \ 'model should inherit from ChainerModel' super(QNeuralNetwork, self).__init__(model.input_shape, model.output_shape) self._gpu_device = None self._loss_val = 0 # Target model update method self._steps = 0 self._target_update_interval = update_interval # Setup model and target network self._minibatch_size = minibatch_size self._model = model self._target = target self._target.copyparams(self._model) # If GPU move to GPU memory if device_id >= 0: with cuda.get_device(device_id) as device: self._gpu_device = device self._model.to_gpu(device) self._target.to_gpu(device) # Setup optimizer self._optimizer = Adam(learning_rate, momentum, 0.999) self._optimizer.setup(self._model)
Example #9
Source File: graph_max_pooling.py From chainer-graph-cnn with MIT License | 5 votes |
def backward_gpu(self, inputs, grad_outputs): x = inputs[0] xp = cuda.get_array_module(x) n_batch, c_in, N = x.shape with cuda.get_device(x.data): x = x.transpose((2, 1, 0)) gy = grad_outputs[0] N_coarse = gy.shape[2] gy = gy.transpose((2, 1, 0)) gx = xp.zeros((N, c_in, n_batch), dtype=x.dtype) gpu_graphpool_bwd(N, N_coarse, self.max_inds, gy, gx) gx = gx.transpose((2, 1, 0)) return gx,
Example #10
Source File: graph_convolution.py From chainer-graph-cnn with MIT License | 5 votes |
def backward_gpu(self, inputs, grad_outputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] xp = cuda.get_array_module(x) with cuda.get_device(x.data): n_batch, c_in, N = x.shape c_out = gy.shape[1] gW = xp.tensordot(gy, self.C, ((0, 2), (0, 3)) ).astype(W.dtype, copy=False) K = self.K LmI_data, LmI_indices, LmI_indptr = self.LmI_tuple if x.dtype != LmI_data.dtype: LmI_data = LmI_data.astype(x.dtype) C = xp.empty((K, N, c_out, n_batch), dtype=x.dtype) chebyshev_matvec_gpu(C, gy, K, n_batch, LmI_data, LmI_indices, LmI_indptr) C = C.transpose((3, 2, 0, 1)) gx = xp.tensordot(C, W, ((1, 2), (0, 2))) gx = xp.rollaxis(gx, 2, 1) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2)) return gx, gW, gb
Example #11
Source File: graph_convolution.py From chainer-graph-cnn with MIT License | 5 votes |
def to_gpu(self, device=None): with cuda.get_device(device): self.LmI_tuple = tuple(map(cuda.to_gpu, self.LmI_tuple))
Example #12
Source File: graph_convolution.py From chainer-graph-cnn with MIT License | 5 votes |
def to_gpu(self, device=None): with cuda.get_device(device): super(GraphConvolution, self).to_gpu(device) self.func.to_gpu(device)
Example #13
Source File: chainer_utility.py From Comicolorization with MIT License | 5 votes |
def _concat_arrays(arrays): xp = cuda.get_array_module(arrays[0]) with cuda.get_device(arrays[0]): return xp.concatenate([array[None] for array in arrays])
Example #14
Source File: qlearning.py From malmo-challenge with MIT License | 5 votes |
def __init__(self, model, target, device_id=-1, learning_rate=0.00025, momentum=.9, minibatch_size=32, update_interval=10000): assert isinstance(model, ChainerModel), \ 'model should inherit from ChainerModel' super(QNeuralNetwork, self).__init__(model.input_shape, model.output_shape) self._gpu_device = None self._loss_val = 0 # Target model update method self._steps = 0 self._target_update_interval = update_interval # Setup model and target network self._minibatch_size = minibatch_size self._model = model self._target = target self._target.copyparams(self._model) # If GPU move to GPU memory if device_id >= 0: with cuda.get_device(device_id) as device: self._gpu_device = device self._model.to_gpu(device) self._target.to_gpu(device) # Setup optimizer self._optimizer = Adam(learning_rate, momentum, 0.999) self._optimizer.setup(self._model)
Example #15
Source File: nonbias_weight_decay.py From async-rl with MIT License | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T p, T decay', 'T g', 'g += decay * p', 'weight_decay') rate = self.rate for name, param in opt.target.namedparams(): if name == 'b' or name.endswith('/b'): continue p, g = param.data, param.grad with cuda.get_device(p) as dev: if int(dev) == -1: g += rate * p else: kernel(p, rate, g)
Example #16
Source File: weight_clip.py From GUINNESS with GNU General Public License v2.0 | 5 votes |
def __call__(self, opt): if cuda.available: kernel = cuda.elementwise( 'T low, T high', 'T p', 'p = (p < low) ? low : (p > high) ? high : p', 'weight_clip') for param in opt.target.params(): p = param.data with cuda.get_device(p) as dev: if int(dev) == -1: numpy.clip(p, self.low, self.high) else: kernel(self.low, self.high, p)
Example #17
Source File: lbfgs.py From chainer-dfi with MIT License | 5 votes |
def init_state(self, param): with cuda.get_device(param.data): self.state['s'] = []
Example #18
Source File: chainer_backend.py From Chimp with Apache License 2.0 | 5 votes |
def set_net(self, net): self.source_net = deepcopy(net) self.target_net = deepcopy(net) if self.gpu: cuda.get_device(0).use() self.source_net.to_gpu() self.target_net.to_gpu() self.optimizer.setup(self.source_net) self.target_net.train = False
Example #19
Source File: utils.py From imgclsmob with MIT License | 5 votes |
def prepare_ch_context(num_gpus): use_gpus = (num_gpus > 0) if use_gpus: cuda.get_device(0).use() return use_gpus
Example #20
Source File: eval_ch_in1k-.py From imgclsmob with MIT License | 5 votes |
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_gpus=(num_gpus > 0)) num_classes = net.classes if hasattr(net, "classes") else 1000 input_image_size = net.in_size[0] if hasattr(net, "in_size") else args.input_size val_iterator, val_dataset_len = get_val_data_iterator( data_dir=args.data_dir, batch_size=args.batch_size, num_workers=args.num_workers, num_classes=num_classes) assert (args.use_pretrained or args.resume.strip()) test( net=net, val_iterator=val_iterator, val_dataset_len=val_dataset_len, num_gpus=num_gpus, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor, calc_weight_count=True, extended_log=True)
Example #21
Source File: eval_ch_seg-.py From imgclsmob with MIT License | 5 votes |
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), net_extra_kwargs={"aux": False, "fixed_size": False}, use_gpus=(num_gpus > 0)) test_dataset = get_test_dataset( dataset_name=args.dataset, dataset_dir=args.data_dir) assert (args.use_pretrained or args.resume.strip()) test( net=net, test_dataset=test_dataset, num_gpus=num_gpus, num_classes=args.num_classes, calc_weight_count=True, extended_log=True, dataset_metainfo=get_metainfo(args.dataset))
Example #22
Source File: eval_ch_cifar-.py From imgclsmob with MIT License | 5 votes |
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) global_config.train = False num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_gpus=(num_gpus > 0)) val_iterator, val_dataset_len = get_val_data_iterator( dataset_name=args.dataset, batch_size=args.batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip()) test( net=net, val_iterator=val_iterator, val_dataset_len=val_dataset_len, num_gpus=num_gpus, calc_weight_count=True, extended_log=True)
Example #23
Source File: scheduled_adam.py From knmt with GNU General Public License v3.0 | 5 votes |
def init_state(self, param, state): xp = cuda.get_array_module(param.data) with cuda.get_device(param.data): state['m'] = xp.zeros_like(param.data) state['v'] = xp.zeros_like(param.data)
Example #24
Source File: encoder_decoder.py From knmt with GNU General Public License v3.0 | 5 votes |
def compute_lexicon_probability_matrix(self, src_batch): if self.lexical_probability_dictionary is not None: lexicon_probability_matrix = compute_lexicon_matrix( src_batch, self.lexical_probability_dictionary, self.Vo) if self.xp != np: lexicon_probability_matrix = cuda.to_gpu( lexicon_probability_matrix, cuda.get_device( self.dec.lin_o.W.data)) else: lexicon_probability_matrix = None return lexicon_probability_matrix
Example #25
Source File: evaluate.py From fcn with MIT License | 4 votes |
def main(): parser = argparse.ArgumentParser() parser.add_argument('model_file') parser.add_argument('-g', '--gpu', default=0, type=int, help='if -1, use cpu only (default: 0)') args = parser.parse_args() dataset = fcn.datasets.VOC2011ClassSeg('seg11valid') n_class = len(dataset.class_names) basename = osp.basename(args.model_file).lower() if basename.startswith('fcn8s-atonce') or \ basename.startswith('fcn8satonce'): model_name = 'FCN8sAtOnce' else: match = re.match('^fcn(32|16|8)s.*$', basename) if match is None: print('Unsupported model filename: %s' % args.model_file) quit(1) model_name = 'FCN%ss' % match.groups()[0] model_class = getattr(fcn.models, model_name) model = model_class(n_class=n_class) chainer.serializers.load_npz(args.model_file, model) if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() lbl_preds, lbl_trues = [], [] for i in tqdm.trange(len(dataset)): datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16( dataset.get_example(i)) x_data = np.expand_dims(datum, axis=0) if args.gpu >= 0: x_data = cuda.to_gpu(x_data) with chainer.no_backprop_mode(): x = chainer.Variable(x_data) with chainer.using_config('train', False): model(x) lbl_pred = chainer.functions.argmax(model.score, axis=1)[0] lbl_pred = chainer.cuda.to_cpu(lbl_pred.data) lbl_preds.append(lbl_pred) lbl_trues.append(lbl_true) acc, acc_cls, mean_iu, fwavacc = \ fcn.utils.label_accuracy_score(lbl_trues, lbl_preds, n_class) print('Accuracy: %.4f' % (100 * acc)) print('AccClass: %.4f' % (100 * acc_cls)) print('Mean IoU: %.4f' % (100 * mean_iu)) print('Fwav Acc: %.4f' % (100 * fwavacc))
Example #26
Source File: train_ch_cifar.py From imgclsmob with MIT License | 4 votes |
def main(): args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) num_gpus = args.num_gpus if num_gpus > 0: cuda.get_device(0).use() batch_size = args.batch_size net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), num_gpus=num_gpus) train_iter, val_iter = get_data_iterators( batch_size=batch_size, num_workers=args.num_workers) trainer = prepare_trainer( net=net, optimizer_name=args.optimizer_name, lr=args.lr, momentum=args.momentum, num_epochs=args.num_epochs, train_iter=train_iter, val_iter=val_iter, logging_dir_path=args.save_dir, num_gpus=num_gpus) # if args.save_dir and args.save_interval: # lp_saver = TrainLogParamSaver( # checkpoint_file_name_prefix='imagenet_{}'.format(args.model), # last_checkpoint_file_name_suffix="last", # best_checkpoint_file_name_suffix=None, # last_checkpoint_dir_path=args.save_dir, # best_checkpoint_dir_path=None, # last_checkpoint_file_count=2, # best_checkpoint_file_count=2, # checkpoint_file_save_callback=save_params, # checkpoint_file_exts=['.npz', '.states'], # save_interval=args.save_interval, # num_epochs=args.num_epochs, # param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'], # acc_ind=2, # # bigger=[True], # # mask=None, # score_log_file_path=os.path.join(args.save_dir, 'score.log'), # score_log_attempt_value=args.attempt, # best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log')) # else: # lp_saver = None trainer.run()
Example #27
Source File: evaluate.py From Guided-Attention-Inference-Network with MIT License | 4 votes |
def evaluate(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--file', type=str, help='model file path') args = parser.parse_args() file = args.file print("evaluating: ",file) dataset = fcn.datasets.VOC2011ClassSeg('seg11valid') n_class = len(dataset.class_names) model = FCN8s() chainer.serializers.load_npz(file, model) gpu = 0 if gpu >= 0: cuda.get_device(gpu).use() model.to_gpu() lbl_preds, lbl_trues = [], [] for i in tqdm.trange(len(dataset)): datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16( dataset.get_example(i)) x_data = np.expand_dims(datum, axis=0) if gpu >= 0: x_data = cuda.to_gpu(x_data) with chainer.no_backprop_mode(): x = chainer.Variable(x_data) with chainer.using_config('train', False): model(x) lbl_pred = chainer.functions.argmax(model.score, axis=1)[0] lbl_pred = chainer.cuda.to_cpu(lbl_pred.data) lbl_preds.append(lbl_pred) lbl_trues.append(lbl_true) acc, acc_cls, mean_iu, fwavacc = fcn.utils.label_accuracy_score(lbl_trues, lbl_preds, n_class) print('Accuracy: %.4f' % (100 * acc)) print('AccClass: %.4f' % (100 * acc_cls)) print('Mean IoU: %.4f' % (100 * mean_iu)) print('Fwav Acc: %.4f' % (100 * fwavacc))