Python caffe.set_mode_gpu() Examples
The following are 30
code examples of caffe.set_mode_gpu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe
, or try the search function
.
Example #1
Source File: action_caffe.py From DTPP with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, net_proto, net_weights, device_id, input_size=None): caffe.set_mode_gpu() caffe.set_device(device_id) self._net = caffe.Net(net_proto, net_weights, caffe.TEST) input_shape = self._net.blobs['data'].data.shape if input_size is not None: input_shape = input_shape[:2] + input_size transformer = caffe.io.Transformer({'data': input_shape}) #if self._net.blobs['data'].data.shape[1] == 3: #printf # transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension # transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel #else: # pass # non RGB data need not use transformer self._transformer = transformer self._sample_shape = self._net.blobs['data'].data.shape
Example #2
Source File: train.py From Deep-Learning-Based-Structural-Damage-Detection with MIT License | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #3
Source File: action_caffe.py From ECO-pytorch with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, net_proto, net_weights, device_id, input_size=None): caffe.set_mode_gpu() caffe.set_device(device_id) self._net = caffe.Net(net_proto, net_weights, caffe.TEST) input_shape = self._net.blobs['data'].data.shape if input_size is not None: input_shape = input_shape[:2] + input_size transformer = caffe.io.Transformer({'data': input_shape}) if self._net.blobs['data'].data.shape[1] == 3: transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel else: pass # non RGB data need not use transformer self._transformer = transformer self._sample_shape = self._net.blobs['data'].data.shape
Example #4
Source File: colorize.py From reddit_crawlers with BSD 2-Clause "Simplified" License | 6 votes |
def loadDNN(useGpu = False): global net,W_in,H_in,H_out,W_out,lm_lab_l_rs if useGpu: gpu_id = 0 caffe.set_mode_gpu() caffe.set_device(gpu_id) net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST) print '\n done loading network! \n' (H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape (H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature l_mean = sio.loadmat('ilsvrc_2012_mean.mat') lm = np.array(l_mean['mean_data']) lm = lm/np.max(lm) lm_lab = color.rgb2lab(lm) lm_lab_l = lm_lab[:,:,0] lm_lab_l = lm_lab_l - np.mean(np.mean(lm_lab_l)) + 50 lm_lab_l = Image.fromarray(lm_lab_l) lm_lab_l_rs = lm_lab_l.resize((W_in,H_in), Image.ANTIALIAS)
Example #5
Source File: train.py From tripletloss with MIT License | 6 votes |
def __init__(self, solver_prototxt, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(0) self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param)
Example #6
Source File: predict.py From cloudless with Apache License 2.0 | 6 votes |
def _initialize_caffe(deploy_file, input_weight_file, training_mean_pickle, inference_width, inference_height): """ Initializes Caffe to prepare to run some data through the model for inference. """ caffe.set_mode_gpu() net = caffe.Net(deploy_file, input_weight_file, caffe.TEST) # input preprocessing: 'data' is the name of the input blob == net.inputs[0] transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape}) # PIL.Image loads the data with the channel last. transformer.set_transpose("data", (2, 0, 1)) # Mean pixel. transformer.set_mean("data", np.load(training_mean_pickle).mean(1).mean(1)) # The reference model operates on images in [0, 255] range instead of [0, 1]. transformer.set_raw_scale("data", 255) # The reference model has channels in BGR order instead of RGB. transformer.set_channel_swap("data", (2, 1, 0)) net.blobs["data"].reshape(1, 3, inference_height, inference_width) return (net, transformer)
Example #7
Source File: ssd_net.py From Hand-Keypoint-Detection with GNU General Public License v3.0 | 6 votes |
def __init__(self, model_weights, model_def, threshold=0.5, GPU_MODE=False): if GPU_MODE: caffe.set_device(0) caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.net = caffe.Net(model_def, # defines the structure of the model model_weights, # contains the trained weights caffe.TEST) # use test mode (e.g., don't perform dropout) self.threshold = threshold self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape}) self.transformer.set_transpose('data', (2, 0, 1)) self.transformer.set_mean('data', np.array([127.0, 127.0, 127.0])) # mean pixel self.transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1] self.transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB image_resize = 300 self.net.blobs['data'].reshape(1, 3, image_resize, image_resize)
Example #8
Source File: makeNet.py From calc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train(solver_proto_path, snapshot_solver_path=None, init_weights=None, GPU_ID=0): """ Train the defined net. While we did not use this function for our final net, we used the caffe executable for multi-gpu use, this was used for prototyping """ import time t0 = time.time() caffe.set_mode_gpu() caffe.set_device(GPU_ID) solver = caffe.get_solver(solver_proto_path) if snapshot_solver_path is not None: solver.solve(snapshot_solver_path) # train from previous solverstate else: if init_weights is not None: solver.net.copy_from(init_weights) # for copying weights from a model without solverstate solver.solve() # train form scratch t1 = time.time() print 'Total training time: ', t1-t0, ' sec' model_dir = "calc_" + time.strftime("%d-%m-%Y_%I%M%S") moveModel(model_dir=model_dir) # move all the model files to a directory print "Moved model to model/"+model_dir
Example #9
Source File: example.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #10
Source File: rasterize_jon_scratch.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #11
Source File: example.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #12
Source File: rasterize.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #13
Source File: rasterize_jon_scratch.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #14
Source File: features.py From retrieval-2016-deepvision with MIT License | 6 votes |
def __init__(self,params): self.dimension = params['dimension'] self.dataset = params['dataset'] self.pooling = params['pooling'] # Read image lists with open(params['query_list'],'r') as f: self.query_names = f.read().splitlines() with open(params['frame_list'],'r') as f: self.database_list = f.read().splitlines() # Parameters needed self.layer = params['layer'] self.save_db_feats = params['database_feats'] # Init network if params['gpu']: caffe.set_mode_gpu() caffe.set_device(0) else: caffe.set_mode_cpu() print "Extracting from:", params['net_proto'] cfg.TEST.HAS_RPN = True self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
Example #15
Source File: colorize_image.py From interactive-deep-colorization with MIT License | 6 votes |
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''): import caffe print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path)) if gpu_id == -1: caffe.set_mode_cpu() else: caffe.set_device(gpu_id) caffe.set_mode_gpu() self.gpu_id = gpu_id self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST) self.net_set = True # automatically set cluster centers if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313: print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer) self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T # automatically set upsampling kernel for layer in self.net._layer_names: if layer[-3:] == '_us': print('Setting upsampling layer kernel: %s' % layer) self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :] # ***** Call forward *****
Example #16
Source File: train_large_file.py From uai-sdk with Apache License 2.0 | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #17
Source File: multigpu.py From DTPP with BSD 2-Clause "Simplified" License | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #18
Source File: train.py From uai-sdk with Apache License 2.0 | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #19
Source File: end2end_test_caffe.py From TPN with MIT License | 6 votes |
def load_models(args): # load rnn model caffe.set_mode_gpu() if args.gpus is None: caffe.set_device(args.job_id - 1) else: assert args.job_id <= len(args.gpus) caffe.set_device(args.gpus[args.job_id-1]) if args.lstm_param is not '': rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST) print 'Loaded RNN network from {:s}.'.format(args.lstm_def) else: rnn_net = caffe.Net(args.lstm_def, caffe.TEST) print 'WARNING: dummy RNN network created.' # load feature model feature_net = caffe.Net(args.def_file, args.param, caffe.TEST) print 'Loaded feature network from {:s}.'.format(args.def_file) return feature_net, rnn_net
Example #20
Source File: sequence_roi_train.py From TPN with MIT License | 6 votes |
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) net = solver.net if args.weights: print "Copying weights from {}".format(args.weights) net.copy_from(args.weights) return solver, net
Example #21
Source File: tpn_train.py From TPN with MIT License | 6 votes |
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) rnn = solver.net if args.weights: rnn.copy_from(args.weights) feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST) # apply bbox regression normalization on the net weights with open(args.bbox_mean, 'rb') as f: bbox_means = cPickle.load(f) with open(args.bbox_std, 'rb') as f: bbox_stds = cPickle.load(f) feature_net.params['bbox_pred_vid'][0].data[...] = \ feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis] feature_net.params['bbox_pred_vid'][1].data[...] = \ feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means return solver, feature_net, rnn, bbox_means, bbox_stds
Example #22
Source File: train.py From uai-sdk with Apache License 2.0 | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #23
Source File: train.py From uai-sdk with Apache License 2.0 | 6 votes |
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
Example #24
Source File: action_caffe.py From temporal-segment-networks with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, net_proto, net_weights, device_id, input_size=None): caffe.set_mode_gpu() caffe.set_device(device_id) self._net = caffe.Net(net_proto, net_weights, caffe.TEST) input_shape = self._net.blobs['data'].data.shape if input_size is not None: input_shape = input_shape[:2] + input_size transformer = caffe.io.Transformer({'data': input_shape}) if self._net.blobs['data'].data.shape[1] == 3: transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel else: pass # non RGB data need not use transformer self._transformer = transformer self._sample_shape = self._net.blobs['data'].data.shape
Example #25
Source File: example.py From hocrux with Apache License 2.0 | 6 votes |
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
Example #26
Source File: rfcn_inference.py From uai-sdk with Apache License 2.0 | 5 votes |
def load_model(self): caffe.set_mode_gpu() prototxt = 'models/test_agnostic.prototxt' caffemodel = "models/resnet101_rfcn_final.caffemodel" cfg.TEST.HAS_RPN = True self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
Example #27
Source File: train_faster_rcnn_alt_opt.py From uai-sdk with Apache License 2.0 | 5 votes |
def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID)
Example #28
Source File: style_transfer.py From style_transfer with MIT License | 5 votes |
def run(self): """This method runs in the new process.""" global logger setup_exceptions() logger = log_utils.setup_logger('tile_worker') if self.caffe_path is not None: sys.path.append(self.caffe_path + '/python') if self.device >= 0: os.environ['CUDA_VISIBLE_DEVICES'] = str(self.device) import caffe if self.device >= 0: caffe.set_mode_gpu() else: caffe.set_mode_cpu() caffe.set_random_seed(0) np.random.seed(0) self.model = CaffeModel(*self.model_info) self.model.img = np.zeros((3, 1, 1), dtype=np.float32) while True: try: self.process_one_request() except KeyboardInterrupt: break
Example #29
Source File: train_faster_rcnn_alt_opt.py From uai-sdk with Apache License 2.0 | 5 votes |
def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID)
Example #30
Source File: sequence_roi_val.py From TPN with MIT License | 5 votes |
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) net = caffe.Net(args.model, args.weights, caffe.TEST) return net