Python multiprocessing.get_context() Examples
The following are 30
code examples of multiprocessing.get_context().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing
, or try the search function
.
Example #1
Source File: local_timer_example.py From elastic with BSD 3-Clause "New" or "Revised" License | 8 votes |
def test_torch_mp_example(self): # in practice set the max_interval to a larger value (e.g. 60 seconds) mp_queue = mp.get_context("spawn").Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 # all processes should complete successfully # since start_process does NOT take context as parameter argument yet # this method WILL FAIL (hence the test is disabled) torch_mp.spawn( fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True ) with self.assertRaises(Exception): # torch.multiprocessing.spawn kills all sub-procs # if one of them gets killed torch_mp.spawn( fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True ) server.stop()
Example #2
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #3
Source File: test_operator_gpu.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_kernel_error_checking(): # Running tests that may throw exceptions out of worker threads will stop CI testing # if not run in a separate process (with its own address space for CUDA compatibility). try: mpctx = mp.get_context('spawn') except: print('SKIP: python%s.%s lacks the required process fork-exec support ... ' % sys.version_info[0:2], file=sys.stderr, end='') else: with discard_stderr(): for f in [kernel_error_check_imperative, kernel_error_check_symbolic]: p = mpctx.Process(target=f) p.start() p.join() assert p.exitcode != 0,\ "Expected a synchronous kernel error from %s(), none seen." % f.__name__
Example #4
Source File: test_multiprocessing.py From loguru with MIT License | 6 votes |
def test_apply_async_spawn(monkeypatch): ctx = multiprocessing.get_context("spawn") monkeypatch.setattr(loguru._handler, "multiprocessing", ctx) writer = Writer() logger.add(writer, format="{message}", enqueue=True, catch=False) with ctx.Pool(1, set_logger, [logger]) as pool: for i in range(3): result = pool.apply_async(do_something, (i,)) result.get() pool.close() pool.join() logger.info("Done!") logger.remove() assert writer.read() == "#0\n#1\n#2\nDone!\n"
Example #5
Source File: test_multiprocessing.py From loguru with MIT License | 6 votes |
def test_apply_spawn(monkeypatch): ctx = multiprocessing.get_context("spawn") monkeypatch.setattr(loguru._handler, "multiprocessing", ctx) writer = Writer() logger.add(writer, format="{message}", enqueue=True, catch=False) with ctx.Pool(1, set_logger, [logger]) as pool: for i in range(3): pool.apply(do_something, (i,)) pool.close() pool.join() logger.info("Done!") logger.remove() assert writer.read() == "#0\n#1\n#2\nDone!\n"
Example #6
Source File: local_timer_example.py From elastic with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _run_example_with(self, start_method): spawn_ctx = mp.get_context(start_method) mp_queue = spawn_ctx.Queue() server = timer.LocalTimerServer(mp_queue, max_interval=0.01) server.start() world_size = 8 processes = [] for i in range(0, world_size): if i % 2 == 0: p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue)) else: p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue)) p.start() processes.append(p) for i in range(0, world_size): p = processes[i] p.join() if i % 2 == 0: self.assertEqual(-signal.SIGKILL, p.exitcode) else: self.assertEqual(0, p.exitcode) server.stop()
Example #7
Source File: test_rpc.py From dgl with Apache License 2.0 | 6 votes |
def test_multi_client(): ip_config = open("rpc_ip_config_mul_client.txt", "w") ip_addr = get_local_usable_addr() ip_config.write('%s 1\n' % ip_addr) ip_config.close() ctx = mp.get_context('spawn') pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt")) pclient_list = [] for i in range(10): pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",)) pclient_list.append(pclient) pserver.start() time.sleep(1) for i in range(10): pclient_list[i].start() for i in range(10): pclient_list[i].join() pserver.join()
Example #8
Source File: _test_multiprocessing.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #9
Source File: test_new_kvstore.py From dgl with Apache License 2.0 | 6 votes |
def test_kv_store(): # start 10 server and 10 client ip_config = open("kv_ip_config.txt", "w") ip_addr = get_local_usable_addr() ip_config.write('%s 10\n' % ip_addr) ip_config.close() ctx = mp.get_context('spawn') pserver_list = [] pclient_list = [] for i in range(10): pserver = ctx.Process(target=start_server, args=(i, 10)) pserver.start() pserver_list.append(pserver) time.sleep(2) for i in range(10): pclient = ctx.Process(target=start_client, args=(10,)) pclient.start() pclient_list.append(pclient) for i in range(10): pclient_list[i].join() for i in range(10): pserver_list[i].join()
Example #10
Source File: test_coroutine_sink.py From loguru with MIT License | 6 votes |
def test_complete_with_sub_processes(monkeypatch, capsys): ctx = multiprocessing.get_context("spawn") monkeypatch.setattr(loguru._handler, "multiprocessing", ctx) loop = asyncio.new_event_loop() writer = Writer() logger.add(writer.write, format="{message}", enqueue=True, loop=loop) process = ctx.Process(target=subworker, args=[logger]) process.start() process.join() async def complete(): await logger.complete() loop.run_until_complete(complete()) out, err = capsys.readouterr() assert out == err == "" assert writer.output == "Child\n"
Example #11
Source File: context.py From loky with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_context(method=None): # Try to overload the default context method = method or _DEFAULT_START_METHOD or "loky" if method == "fork": # If 'fork' is explicitly requested, warn user about potential # issues. warnings.warn("`fork` start method should not be used with " "`loky` as it does not respect POSIX. Try using " "`spawn` or `loky` instead.", UserWarning) try: context = mp_get_context(method) except ValueError: raise ValueError("Unknown context '{}'. Value should be in {}." .format(method, START_METHODS)) return context
Example #12
Source File: parallel_history.py From rltime with Apache License 2.0 | 6 votes |
def __init__(self, history_cls, history_args): super().__init__() self._last_needed_feed_count = 0 self.results = {} self.pending_counts = {} # Make sure to use 'spawn' and not 'fork' to allow shared CUDA tensors # on linux ctx = mp.get_context('spawn') self.close_event = ctx.Event() self.qevent = ctx.Event() # Queue for requests, such as getting training data self.request_queue = ctx.Queue(10) # Queue for updates like new acting samples and priority updates self.update_queue = ctx.Queue(10) # Queue for sending back request results self.result_queue = ctx.Queue() self._process = ctx.Process( target=self.run, args=(history_cls, cloudpickle.dumps(history_args))) self._process.start()
Example #13
Source File: test_habitat_env.py From habitat-api with MIT License | 6 votes |
def test_vectorized_envs(multiprocessing_start_method, gpu2gpu): import habitat_sim if gpu2gpu and not habitat_sim.cuda_enabled: pytest.skip("GPU-GPU requires CUDA") configs, datasets = _load_test_data() if multiprocessing_start_method == "fork": if gpu2gpu: pytest.skip("Fork does not support gpu2gpu") # 'fork' works in a process that has yet to use the GPU # this test uses spawns a new python instance, which allows us to fork mp_ctx = mp.get_context("spawn") p = mp_ctx.Process( target=_vec_env_test_fn, args=(configs, datasets, multiprocessing_start_method, gpu2gpu), ) p.start() p.join() assert p.exitcode == 0 else: _vec_env_test_fn( configs, datasets, multiprocessing_start_method, gpu2gpu )
Example #14
Source File: scheduler_job.py From airflow with Apache License 2.0 | 6 votes |
def start(self): """ Launch the process and start processing the DAG. """ start_method = self._get_multiprocessing_start_method() context = multiprocessing.get_context(start_method) self._parent_channel, _child_channel = context.Pipe() self._process = context.Process( target=type(self)._run_file_processor, args=( _child_channel, self.file_path, self._pickle_dags, self._dag_ids, "DagFileProcessor{}".format(self._instance_id), self._failure_callback_requests ), name="DagFileProcessor{}-Process".format(self._instance_id) ) self._start_time = timezone.utcnow() self._process.start()
Example #15
Source File: test_multiprocessing.py From loguru with MIT License | 6 votes |
def test_process_spawn(monkeypatch): ctx = multiprocessing.get_context("spawn") monkeypatch.setattr(loguru._handler, "multiprocessing", ctx) writer = Writer() logger.add(writer, format="{message}", enqueue=True, catch=False) process = ctx.Process(target=subworker, args=(logger,)) process.start() process.join() assert process.exitcode == 0 logger.info("Main") logger.remove() assert writer.read() == "Child\nMain\n"
Example #16
Source File: face_recognition_cli.py From face_recognition with MIT License | 6 votes |
def process_images_in_process_pool(images_to_check, known_names, known_face_encodings, number_of_cpus, tolerance, show_distance): if number_of_cpus == -1: processes = None else: processes = number_of_cpus # macOS will crash due to a bug in libdispatch if you don't use 'forkserver' context = multiprocessing if "forkserver" in multiprocessing.get_all_start_methods(): context = multiprocessing.get_context("forkserver") pool = context.Pool(processes=processes) function_parameters = zip( images_to_check, itertools.repeat(known_names), itertools.repeat(known_face_encodings), itertools.repeat(tolerance), itertools.repeat(show_distance) ) pool.starmap(test_image, function_parameters)
Example #17
Source File: face_detection_cli.py From face_recognition with MIT License | 6 votes |
def process_images_in_process_pool(images_to_check, number_of_cpus, model): if number_of_cpus == -1: processes = None else: processes = number_of_cpus # macOS will crash due to a bug in libdispatch if you don't use 'forkserver' context = multiprocessing if "forkserver" in multiprocessing.get_all_start_methods(): context = multiprocessing.get_context("forkserver") pool = context.Pool(processes=processes) function_parameters = zip( images_to_check, itertools.repeat(model), ) pool.starmap(test_image, function_parameters)
Example #18
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #19
Source File: test_server.py From aiotools with MIT License | 6 votes |
def test_server_multiproc(mocker, set_timeout, restore_signal, start_method): mpctx = mp.get_context(start_method) mocker.patch('aiotools.server.mp', mpctx) started = mpctx.Value('i', 0) terminated = mpctx.Value('i', 0) proc_idxs = mpctx.Array('i', 3) set_timeout(0.2, interrupt) aiotools.start_server(myserver_multiproc, num_workers=3, args=(started, terminated, proc_idxs)) assert started.value == 3 assert terminated.value == 3 assert list(proc_idxs) == [0, 1, 2] assert len(mp.active_children()) == 0
Example #20
Source File: test_server.py From aiotools with MIT License | 6 votes |
def test_server_multiproc_custom_stop_signals( mocker, set_timeout, restore_signal, start_method): mpctx = mp.get_context(start_method) mocker.patch('aiotools.server.mp', mpctx) started = mpctx.Value('i', 0) terminated = mpctx.Value('i', 0) received_signals = mpctx.Array('i', 2) proc_idxs = mpctx.Array('i', 2) set_timeout(0.2, interrupt_usr1) aiotools.start_server(myserver_multiproc_custom_stop_signals, num_workers=2, stop_signals={signal.SIGUSR1}, args=(started, terminated, received_signals, proc_idxs)) assert started.value == 2 assert terminated.value == 2 assert list(received_signals) == [signal.SIGUSR1, signal.SIGUSR1] assert list(proc_idxs) == [0, 1] assert len(mpctx.active_children()) == 0
Example #21
Source File: workflow.py From oggm with BSD 3-Clause "New" or "Revised" License | 6 votes |
def init_mp_pool(reset=False): """Necessary because at import time, cfg might be uninitialized""" global _mp_pool if _mp_pool and not reset: return _mp_pool cfg.CONFIG_MODIFIED = False if _mp_pool and reset: _mp_pool.terminate() _mp_pool = None if cfg.PARAMS['use_mp_spawn']: mp = multiprocessing.get_context('spawn') else: mp = multiprocessing cfg_contents = cfg.pack_config() global_lock = mp.Manager().Lock() mpp = cfg.PARAMS['mp_processes'] _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
Example #22
Source File: test_pickle_dataset.py From chainer with MIT License | 6 votes |
def test_after_fork(self): writer = datasets.PickleDatasetWriter(self.io) writer.write(1) writer.flush() reader = ReaderMock(self.io) # Assign to avoid destruction of the instance # before creation a child process dataset = datasets.PickleDataset(reader) assert reader.n_hook_called == 0 ctx = multiprocessing.get_context('fork') p = ctx.Process() p.start() p.join() assert reader.n_hook_called == 1 assert reader.last_caller_pid == p.pid # Touch to suppress "unused variable' warning del dataset
Example #23
Source File: test_column.py From hangar-py with Apache License 2.0 | 6 votes |
def test_external_multi_process_pool_fails_on_write_enabled_checkout(self, repo, backend): from multiprocessing import get_context co = repo.checkout(write=True) co.add_ndarray_column(name='writtenaset', shape=(20, 20), dtype=np.float32, backend=backend) with co.columns['writtenaset'] as d: for sIdx in range(20): d[sIdx] = np.random.randn(20, 20).astype(np.float32) * 100 assert d.backend == backend co.commit(f'master commit number 1') co.close() nco = repo.checkout(write=True) ds = nco.columns['writtenaset'] keys = [i for i in range(20)] with pytest.raises(PermissionError): with get_context().Pool(2) as P: cmtData = P.map(ds.get, keys) nco.close()
Example #24
Source File: gpu_utils.py From torchgpipe with Apache License 2.0 | 6 votes |
def track_gpu_utils(device_ids: List[int], interval: float = 0.05, ) -> Generator[List[float], None, None]: # Spawn a worker. ctx = mp.get_context('spawn') conn, conn_worker = ctx.Pipe(duplex=True) p = ctx.Process(target=_worker, args=(device_ids, interval, conn_worker)) p.start() conn.recv() # GPU% will be filled to this. gpu_utils: List[float] = [] yield gpu_utils # Stop the worker and receive the timeline. conn.send(None) gpu_timeline = conn.recv() p.join() # Fill the GPU%. if gpu_timeline: gpu_utils.extend(sum(t)/len(t)/100 for t in zip(*gpu_timeline)) else: gpu_utils.extend(0.0 for _ in device_ids)
Example #25
Source File: dag_processing.py From airflow with Apache License 2.0 | 6 votes |
def start(self): """ Launch DagFileProcessorManager processor and start DAG parsing loop in manager. """ mp_start_method = self._get_multiprocessing_start_method() context = multiprocessing.get_context(mp_start_method) self._parent_signal_conn, child_signal_conn = context.Pipe() self._process = context.Process( target=type(self)._run_processor_manager, args=( self._dag_directory, self._max_runs, # getattr prevents error while pickling an instance method. getattr(self, "_processor_factory"), self._processor_timeout, child_signal_conn, self._dag_ids, self._pickle_dags, self._async_mode ) ) self._process.start() self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
Example #26
Source File: test_server.py From aiotools with MIT License | 5 votes |
def test_server_worker_init_error_multi( mocker, restore_signal, use_threading, start_method): mpctx = mp.get_context(start_method) mocker.patch('aiotools.server.mp', mpctx) started = mpctx.Value('i', 0) terminated = mpctx.Value('i', 0) log_queue = mpctx.Queue() aiotools.start_server(myserver_worker_init_error_multi, num_workers=3, use_threading=use_threading, args=(started, terminated, log_queue)) # it should automatically shut down! # reset logging logging.shutdown() assert started.value >= 1 # non-errored workers should have been terminated normally. assert terminated.value >= 1 # there is one worker remaining -- which is "cancelled"! # just ensure that all workers have terminated now. assert len(mpctx.active_children()) == 0 assert not log_queue.empty() has_error_log = False while not log_queue.empty(): rec = log_queue.get() if rec.levelname == 'ERROR': has_error_log = True assert 'initialization' in rec.message # exception info is logged to the console, # but we cannot access it here because exceptions # are not picklable. assert rec.exc_info is None assert has_error_log
Example #27
Source File: test_server.py From aiotools with MIT License | 5 votes |
def test_server_worker_init_error( mocker, restore_signal, use_threading, start_method): mpctx = mp.get_context(start_method) mocker.patch('aiotools.server.mp', mpctx) started = mpctx.Value('i', 0) terminated = mpctx.Value('i', 0) log_queue = mpctx.Queue() aiotools.start_server(myserver_worker_init_error, num_workers=3, use_threading=use_threading, args=(started, terminated, log_queue)) # it should automatically shut down! # reset logging logging.shutdown() assert started.value == 3 # workers who did not raise errors have already started, # and they should have terminated normally # when the errorneous worker interrupted the main loop. assert terminated.value == 2 assert len(mp.active_children()) == 0 assert not log_queue.empty() has_error_log = False while not log_queue.empty(): rec = log_queue.get() if rec.levelname == 'ERROR': has_error_log = True assert 'initialization' in rec.message # exception info is logged to the console, # but we cannot access it here because exceptions # are not picklable. assert rec.exc_info is None assert has_error_log
Example #28
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx)
Example #29
Source File: __init__.py From dagster with Apache License 2.0 | 5 votes |
def get_multiprocessing_context(): # Set execution method to spawn, to avoid fork and to have same behavior between platforms. # Older versions are stuck with whatever is the default on their platform (fork on # Unix-like and spawn on windows) # # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context if hasattr(multiprocessing, 'get_context'): return multiprocessing.get_context('spawn') else: return multiprocessing
Example #30
Source File: multiprocessing.py From kitty with GNU General Public License v3.0 | 5 votes |
def test_spawn() -> None: monkey_patch_multiprocessing() try: from multiprocessing import get_context ctx = get_context('spawn') q = ctx.Queue() p = ctx.Process(target=q.put, args=('hello',)) p.start() x = q.get(timeout=2) assert x == 'hello' p.join() finally: unmonkey_patch_multiprocessing()