Python torch.multiprocessing.get_context() Examples
The following are 20
code examples of torch.multiprocessing.get_context().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.multiprocessing
, or try the search function
.
Example #1
Source File: evaluator.py From TreeFilter-Torch with MIT License | 6 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices, verbose=False, save_path=None, show_image=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_image = show_image
Example #2
Source File: evaluator.py From TorchSeg with MIT License | 6 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices, verbose=False, save_path=None, show_image=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_image = show_image
Example #3
Source File: dist_test.py From TorchSeg with MIT License | 6 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices, verbose=False, save_path=None, show_image=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_image = show_image
Example #4
Source File: evaluator.py From FNA with Apache License 2.0 | 6 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices, verbose=False, save_path=None, show_image=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_image = show_image
Example #5
Source File: batcher.py From ByteCup2018 with MIT License | 5 votes |
def __call__(self, batch_size: int): def get_batches(hyper_batch): indexes = list(range(0, len(hyper_batch), batch_size)) if not self._single_run: # random shuffle for training batches random.shuffle(hyper_batch) random.shuffle(indexes) hyper_batch.sort(key=self._sort_key) for i in indexes: batch = self._batchify(hyper_batch[i:i+batch_size]) yield batch if self._queue is not None: ctx = mp.get_context('forkserver') self._process = ctx.Process( target=_batch2q, args=(self._loader, self._prepro, self._queue, self._single_run) ) self._process.start() while True: d = self._queue.get() if d is None: break if isinstance(d, int): print('\nepoch {} done'.format(d)) continue yield from get_batches(d) self._process.join() else: i = 0 while True: for batch in self._loader: yield from get_batches(self._prepro(batch)) if self._single_run: break i += 1 print('\nepoch {} done'.format(i))
Example #6
Source File: train.py From Street-fighter-A3C-ICM-pytorch with MIT License | 5 votes |
def train(opt): torch.manual_seed(123) if os.path.isdir(opt.log_path): shutil.rmtree(opt.log_path) os.makedirs(opt.log_path) if not os.path.isdir(opt.saved_path): os.makedirs(opt.saved_path) mp = _mp.get_context("spawn") global_model = ActorCritic(num_inputs=3, num_actions=90) global_icm = IntrinsicCuriosityModule(num_inputs=3, num_actions=90) if opt.use_gpu: global_model.cuda() global_icm.cuda() global_model.share_memory() global_icm.share_memory() optimizer = GlobalAdam(list(global_model.parameters()) + list(global_icm.parameters()), lr=opt.lr) processes = [] for index in range(opt.num_processes): if index == 0: process = mp.Process(target=local_train, args=(index, opt, global_model, global_icm, optimizer, True)) else: process = mp.Process(target=local_train, args=(index, opt, global_model, global_icm, optimizer)) process.start() processes.append(process) for process in processes: process.join()
Example #7
Source File: batcher.py From ByteCup2018 with MIT License | 5 votes |
def __init__(self, loader, prepro, sort_key, batchify, single_run=True, queue_size=8, fork=True): self._loader = loader self._prepro = prepro self._sort_key = sort_key self._batchify = batchify self._single_run = single_run if fork: ctx = mp.get_context('forkserver') self._queue = ctx.Queue(queue_size) else: # for easier debugging self._queue = None self._process = None
Example #8
Source File: vec_env.py From SLM-Lab with MIT License | 5 votes |
def __init__(self, env_fns, context='spawn'): ctx = mp.get_context(context) dummy = env_fns[0]() observation_space, action_space = dummy.observation_space, dummy.action_space self.spec = dummy.spec dummy.close() del dummy VecEnv.__init__(self, len(env_fns), observation_space, action_space) self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space) self.obs_bufs = [ {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} for _ in env_fns] self.parent_pipes = [] self.procs = [] with clear_mpi_env_vars(): for env_fn, obs_buf in zip(env_fns, self.obs_bufs): wrapped_fn = CloudpickleWrapper(env_fn) parent_pipe, child_pipe = ctx.Pipe() proc = ctx.Process( target=subproc_worker, args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys)) proc.daemon = True self.procs.append(proc) self.parent_pipes.append(parent_pipe) proc.start() child_pipe.close() self.waiting_step = False self.viewer = None
Example #9
Source File: train.py From Super-mario-bros-A3C-pytorch with MIT License | 5 votes |
def train(opt): torch.manual_seed(123) if os.path.isdir(opt.log_path): shutil.rmtree(opt.log_path) os.makedirs(opt.log_path) if not os.path.isdir(opt.saved_path): os.makedirs(opt.saved_path) mp = _mp.get_context("spawn") env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type) global_model = ActorCritic(num_states, num_actions) if opt.use_gpu: global_model.cuda() global_model.share_memory() if opt.load_from_previous_stage: if opt.stage == 1: previous_world = opt.world - 1 previous_stage = 4 else: previous_world = opt.world previous_stage = opt.stage - 1 file_ = "{}/a3c_super_mario_bros_{}_{}".format(opt.saved_path, previous_world, previous_stage) if os.path.isfile(file_): global_model.load_state_dict(torch.load(file_)) optimizer = GlobalAdam(global_model.parameters(), lr=opt.lr) processes = [] for index in range(opt.num_processes): if index == 0: process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer, True)) else: process = mp.Process(target=local_train, args=(index, opt, global_model, optimizer)) process.start() processes.append(process) process = mp.Process(target=local_test, args=(opt.num_processes, opt, global_model)) process.start() processes.append(process) for process in processes: process.join()
Example #10
Source File: multiprocess_input_pipeline.py From transformer-kernel-ranking with Apache License 2.0 | 5 votes |
def get_multiprocess_batch_queue(name_prefix: str, target_function, files, conf, _logger, queue_size=100) -> Tuple[mp.Queue, List[mp.Process], mp.Event]: ctx = mp.get_context('spawn') # also set so that windows & linux behave the same _processes = [] _finish_notification = ctx.Event() if len(files) == 0: _logger.error("No files for multiprocess loading specified, for: " + name_prefix) exit(1) else: _logger.info("Starting "+str(len(files))+" data loader processes, for:" + name_prefix) if conf["token_embedder_type"] == "fasttext": global fasttext_vocab_cached_mapping global fasttext_vocab_cached_data if fasttext_vocab_cached_data is None: fasttext_vocab_cached_mapping, fasttext_vocab_cached_data = FastTextVocab.load_ids(conf["fasttext_vocab_mapping"],conf["fasttext_max_subwords"]) fasttext_vocab_cached_data.share_memory_() _queue_list = [] #_queue = ctx.Queue(queue_size) for proc_number, file in enumerate(files): _queue = ctx.Queue(queue_size) process = ctx.Process(name=name_prefix + "-" + str(proc_number), target=target_function, args=(proc_number, conf, _queue, _finish_notification, file,fasttext_vocab_cached_mapping,fasttext_vocab_cached_data)) process.start() _processes.append(process) _queue_list.append(_queue) return DeterministicQueue(_queue_list), _processes, _finish_notification #return _queue, _processes, _finish_notification # # training instance generator # - filling the _queue with ready to run training batches # - everything is thread local #
Example #11
Source File: local_elastic_agent.py From elastic with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__( self, spec: WorkerSpec, start_method="spawn", exit_barrier_timeout: float = 300 ): super().__init__(spec, exit_barrier_timeout) self._start_method = start_method # pyre-fixme[8]: Attribute has type `ProcessContext`; used as `None`. self._process_context: mp.ProcessContext = None # a map that holds return values for each worker fn # ret_val[0] holds the return value for worker_0 (global rank 0) self._manager = mp.get_context(start_method).Manager() self._ret_vals = self._manager.dict()
Example #12
Source File: multiprocess_input_pipeline.py From sigir19-neural-ir with Apache License 2.0 | 5 votes |
def get_multiprocess_batch_queue(name_prefix: str, target_function, files, conf, _logger, queue_size=100) -> Tuple[mp.Queue, List[mp.Process], mp.Event]: ctx = mp.get_context('spawn') # also set so that windows & linux behave the same _queue = ctx.Queue(queue_size) _processes = [] _finish_notification = ctx.Event() if len(files) == 0: _logger.error("No files for multiprocess loading specified, for: " + name_prefix) exit(1) else: _logger.info("Starting "+str(len(files))+" data loader processes, for:" + name_prefix) if conf["token_embedder_type"] == "fasttext": global fasttext_vocab_cached_mapping global fasttext_vocab_cached_data if fasttext_vocab_cached_data is None: fasttext_vocab_cached_mapping, fasttext_vocab_cached_data = FastTextVocab.load_ids(conf["fasttext_vocab_mapping"],conf["fasttext_max_subwords"]) fasttext_vocab_cached_data.share_memory_() for proc_number, file in enumerate(files): process = ctx.Process(name=name_prefix + "-" + str(proc_number), target=target_function, args=(proc_number, conf, _queue, _finish_notification, file,fasttext_vocab_cached_mapping,fasttext_vocab_cached_data)) process.start() _processes.append(process) return _queue, _processes, _finish_notification # # training instance generator # - filling the _queue with ready to run training batches # - everything is thread local #
Example #13
Source File: batcher.py From fast_abs_rl with MIT License | 5 votes |
def __call__(self, batch_size: int): def get_batches(hyper_batch): indexes = list(range(0, len(hyper_batch), batch_size)) if not self._single_run: # random shuffle for training batches random.shuffle(hyper_batch) random.shuffle(indexes) hyper_batch.sort(key=self._sort_key) for i in indexes: batch = self._batchify(hyper_batch[i:i+batch_size]) yield batch if self._queue is not None: ctx = mp.get_context('forkserver') self._process = ctx.Process( target=_batch2q, args=(self._loader, self._prepro, self._queue, self._single_run) ) self._process.start() while True: d = self._queue.get() if d is None: break if isinstance(d, int): print('\nepoch {} done'.format(d)) continue yield from get_batches(d) self._process.join() else: i = 0 while True: for batch in self._loader: yield from get_batches(self._prepro(batch)) if self._single_run: break i += 1 print('\nepoch {} done'.format(i))
Example #14
Source File: batcher.py From fast_abs_rl with MIT License | 5 votes |
def __init__(self, loader, prepro, sort_key, batchify, single_run=True, queue_size=8, fork=True): self._loader = loader self._prepro = prepro self._sort_key = sort_key self._batchify = batchify self._single_run = single_run if fork: ctx = mp.get_context('forkserver') self._queue = ctx.Queue(queue_size) else: # for easier debugging self._queue = None self._process = None
Example #15
Source File: uisrnn.py From uis-rnn with Apache License 2.0 | 5 votes |
def parallel_predict(model, test_sequences, args, num_processes=4): """Run prediction in parallel using torch.multiprocessing. This is a beta feature. It makes prediction slower on CPU. But it's reported that it makes prediction faster on GPU. Args: model: instance of UISRNN model test_sequences: a list of test sequences, or a single test sequence. Each test sequence is a 2-dim numpy array of real numbers. See `predict_single()` for details. args: Inference configurations. See `arguments.py` for details. num_processes: number of parallel processes. Returns: a list of the same size as test_sequences, where each element being a 1-dim list of strings. Raises: TypeError: If test_sequences is of wrong type. """ if not isinstance(test_sequences, list): raise TypeError('test_sequences must be a list.') ctx = multiprocessing.get_context('forkserver') model.rnn_model.share_memory() pool = ctx.Pool(num_processes) results = pool.map( functools.partial(model.predict_single, args=args), test_sequences) pool.close() return results
Example #16
Source File: test_sized_dict.py From espnet with Apache License 2.0 | 5 votes |
def test_SizedDict_shared(): d = SizedDict(shared=True) x = torch.randn(10) d["a"] = x mp = multiprocessing.get_context("forkserver") p = mp.Process(target=_set, args=(d,)) p.start() p.join() assert d["a"][0] == 10
Example #17
Source File: AsynTrainEpoch.py From DeepRL with MIT License | 5 votes |
def __init__( self, _agent: AgentAbstract, _env: EnvAbstract, _epoch_max: int, _epoch_train: int, _train_update_target: int, _train_save: int, _process_core: int = None, _save_path: str = './save', _use_cmd: bool = True, ): self.agent: AgentAbstract = _agent self.agent.training() self.env: EnvAbstract = _env # multiprocessing for sampling self.mp = mp.get_context('spawn') self.process_core = _process_core self.pool = self.mp.Pool(self.process_core) # training control self.epoch = 0 self.train_times = 0 self.epoch_max = _epoch_max self.epoch_train = _epoch_train self.train_update_target = _train_update_target self.train_save = _train_save self.total_reward_buf = [] self.save_path = _save_path self.use_cmd = _use_cmd if self.use_cmd: self.shell = TrainShell(self)
Example #18
Source File: evaluator.py From FasterSeg with MIT License | 5 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices=0, out_idx=0, threds=3, config=None, logger=None, verbose=False, save_path=None, show_image=False, show_prediction=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices if type(self.devices) == int: self.devices = [self.devices] self.out_idx = out_idx self.threds = threds self.config = config self.logger = logger self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_image = show_image self.show_prediction = show_prediction
Example #19
Source File: tester.py From FasterSeg with MIT License | 5 votes |
def __init__(self, dataset, class_num, image_mean, image_std, network, multi_scales, is_flip, devices=0, out_idx=0, threds=3, config=None, logger=None, verbose=False, save_path=None, show_prediction=False): self.dataset = dataset self.ndata = self.dataset.get_length() self.class_num = class_num self.image_mean = image_mean self.image_std = image_std self.multi_scales = multi_scales self.is_flip = is_flip self.network = network self.devices = devices if type(self.devices) == int: self.devices = [self.devices] self.out_idx = out_idx self.threds = threds self.config = config self.logger = logger self.context = mp.get_context('spawn') self.val_func = None self.results_queue = self.context.Queue(self.ndata) self.verbose = verbose self.save_path = save_path if save_path is not None: ensure_dir(save_path) self.show_prediction = show_prediction
Example #20
Source File: image2skeleton.py From mmskeleton with Apache License 2.0 | 4 votes |
def inference( detection_cfg, skeleton_cfg, dataset_cfg, gpus=1, worker_per_gpu=1, ): # get frame num video_file = dataset_cfg.video_file video_name = video_file.strip('/n').split('/')[-1] video_frames = mmcv.VideoReader(video_file) num_frames = len(video_frames) del video_frames data_cfg = skeleton_cfg.data_cfg if data_cfg.save_video: data_cfg.img_dir = os.path.join(data_cfg.save_dir, '{}.img'.format(video_name)) if os.path.exists(data_cfg.img_dir): import shutil shutil.rmtree(data_cfg.img_dir) os.makedirs(data_cfg.img_dir) # cache model checkpoints cache_checkpoint(detection_cfg.checkpoint_file) cache_checkpoint(skeleton_cfg.checkpoint_file) # multiprocess settings context = mp.get_context('spawn') result_queue = context.Queue(num_frames) procs = [] for w in range(gpus * worker_per_gpu): shred_list = list(range(w, num_frames, gpus * worker_per_gpu)) p = context.Process(target=worker, args=(video_file, shred_list, detection_cfg, skeleton_cfg, data_cfg, w % gpus, result_queue)) p.start() procs.append(p) all_result = [] print('\nPose estimation start:') prog_bar = ProgressBar(num_frames) for i in range(num_frames): t = result_queue.get() all_result.append(t) prog_bar.update() for p in procs: p.join() if len(all_result) == num_frames and data_cfg.save_video: print('\n\nGenerate video:') video_path = os.path.join(data_cfg.save_dir, video_name) mmcv.frames2video(data_cfg.img_dir, video_path, filename_tmpl='{:01d}.png') print('Video was saved to {}'.format(video_path))