Python multiprocessing.set_start_method() Examples
The following are 30
code examples of multiprocessing.set_start_method().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing
, or try the search function
.
Example #1
Source File: main.py From conditional-motion-propagation with MIT License | 6 votes |
def main(args): with open(args.config) as f: if version.parse(yaml.version >= "5.1"): config = yaml.load(f, Loader=yaml.FullLoader) else: config = yaml.load(f) for k, v in config.items(): setattr(args, k, v) # exp path if not hasattr(args, 'exp_path'): args.exp_path = os.path.dirname(args.config) # dist init if mp.get_start_method(allow_none=True) != 'spawn': mp.set_start_method('spawn', force=True) dist_init(args.launcher, backend='nccl') # train trainer = Trainer(args) trainer.run()
Example #2
Source File: main.py From CrossHair with MIT License | 6 votes |
def watch(args: argparse.Namespace, options: AnalysisOptions) -> int: # Avoid fork() because we've already imported the code we're watching: multiprocessing.set_start_method('spawn') if not args.files: print('No files or directories given to watch', file=sys.stderr) return 1 try: with StateUpdater() as state_updater: watcher = Watcher(options, args.files, state_updater) watcher.check_changed() watcher.run_watch_loop() except KeyboardInterrupt: watcher._pool.terminate() print() print('I enjoyed working with you today!') return 0
Example #3
Source File: utils.py From hfsoftmax with MIT License | 6 votes |
def init_processes(addr, port, gpu_num, backend): from mpi4py import MPI comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() print(rank, size) if mp.get_start_method(allow_none=True) != 'spawn': mp.set_start_method('spawn') torch.cuda.set_device(rank % gpu_num) os.environ['MASTER_ADDR'] = addr os.environ['MASTER_PORT'] = port os.environ['WORLD_SIZE'] = str(size) os.environ['RANK'] = str(rank) dist.init_process_group(backend) print('initialize {} successfully (rank {})'.format(backend, rank)) return rank, size
Example #4
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #5
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #6
Source File: _test_multiprocessing.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 6 votes |
def test_set_get(self): multiprocessing.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocessing.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocessing.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocessing.get_start_method(), method) ctx = multiprocessing.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocessing) count += 1 finally: multiprocessing.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1)
Example #7
Source File: main.py From Pythonic with GNU General Public License v3.0 | 5 votes |
def __init__(self, app): super(MainWindow, self).__init__() self.app = app self.threadpool = QThreadPool() self.initUI() self.setAttribute(Qt.WA_DeleteOnClose) mp.set_start_method('spawn')
Example #8
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def use_spawn(func): """Decorator to test both Unix (fork) and Windows (spawn)""" @six.wraps(func) def wrapper(*args, **kwargs): out = func(*args, **kwargs) if sys.version_info > (3, 4): mp.set_start_method('spawn', force=True) func(*args, **kwargs) mp.set_start_method('fork', force=True) return out return wrapper
Example #9
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def use_spawn(func): """Decorator to test both Unix (fork) and Windows (spawn)""" @six.wraps(func) def wrapper(*args, **kwargs): out = func(*args, **kwargs) if sys.version_info > (3, 4): mp.set_start_method('spawn', force=True) func(*args, **kwargs) mp.set_start_method('fork', force=True) return out return wrapper
Example #10
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def use_spawn(func): """Decorator to test both Unix (fork) and Windows (spawn)""" @six.wraps(func) def wrapper(*args, **kwargs): out = func(*args, **kwargs) if sys.version_info > (3, 4): mp.set_start_method('spawn', force=True) func(*args, **kwargs) mp.set_start_method('fork', force=True) return out return wrapper
Example #11
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def use_spawn(func): """Decorator to test both Unix (fork) and Windows (spawn)""" @six.wraps(func) def wrapper(*args, **kwargs): out = func(*args, **kwargs) if sys.version_info > (3, 4): mp.set_start_method('spawn', force=True) func(*args, **kwargs) mp.set_start_method('fork', force=True) return out return wrapper
Example #12
Source File: running.py From pytracking with GNU General Public License v3.0 | 5 votes |
def run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None): """Runs a list of trackers on a dataset. args: dataset: List of Sequence instances, forming a dataset. trackers: List of Tracker instances. debug: Debug level. threads: Number of threads to use (default 0). visdom_info: Dict containing information about the server for visdom """ multiprocessing.set_start_method('spawn', force=True) print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset))) multiprocessing.set_start_method('spawn', force=True) visdom_info = {} if visdom_info is None else visdom_info if threads == 0: mode = 'sequential' else: mode = 'parallel' if mode == 'sequential': for seq in dataset: for tracker_info in trackers: run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info) elif mode == 'parallel': param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)] with multiprocessing.Pool(processes=threads) as pool: pool.starmap(run_sequence, param_list) print('Done')
Example #13
Source File: _test_multiprocessing.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocessing.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx)
Example #14
Source File: _test_multiprocessing.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_semaphore_tracker(self): import subprocess cmd = '''if 1: import multiprocessing as mp, time, os mp.set_start_method("spawn") lock1 = mp.Lock() lock2 = mp.Lock() os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n") os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n") time.sleep(10) ''' r, w = os.pipe() p = subprocess.Popen([sys.executable, '-c', cmd % (w, w)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _multiprocessing.sem_unlink(name1) p.terminate() p.wait() time.sleep(2.0) with self.assertRaises(OSError) as ctx: _multiprocessing.sem_unlink(name2) # docs say it should be ENOENT, but OSX seems to give EINVAL self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL)) err = p.stderr.read().decode('utf-8') p.stderr.close() expected = 'semaphore_tracker: There appear to be 2 leaked semaphores' self.assertRegex(err, expected) self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1)
Example #15
Source File: main_daemon.py From Pythonic with GNU General Public License v3.0 | 5 votes |
def __init__(self, app): super(MainWorker, self).__init__() self.app = app mp.set_start_method('spawn') self.stdinReader = stdinReader() self.stdinReader.print_procs.connect(self.printProcessList) self.stdinReader.quit_app.connect(self.exitApp) self.update_logdate.connect(self.stdinReader.updateLogDate) self.grd_ops_arr = [] self.fd = sys.stdin.fileno() if os.isatty(sys.stdin.fileno()): self.orig_tty_settings = termios.tcgetattr(self.fd) self.logger = logging.getLogger() self.logger.setLevel(self.log_level) self.log_date = datetime.datetime.now() log_date_str = self.log_date.strftime('%Y_%m_%d') month = self.log_date.strftime('%b') year = self.log_date.strftime('%Y') home_dict = str(Path.home()) file_path = '{}/PythonicDaemon_{}/{}/log_{}.txt'.format(home_dict, year, month, log_date_str) self.ensure_file_path(file_path) file_handler = logging.FileHandler(file_path) file_handler.setLevel(self.log_level) file_handler.setFormatter(self.formatter) self.logger.addHandler(file_handler) self.update_logdate.emit(log_date_str) # forward log_date_str to instance of stdinReader logging.debug('MainWorker::__init__() called')
Example #16
Source File: data_utils_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def use_spawn(func): """Decorator to test both Unix (fork) and Windows (spawn)""" @six.wraps(func) def wrapper(*args, **kwargs): out = func(*args, **kwargs) if sys.version_info > (3, 4): mp.set_start_method('spawn', force=True) func(*args, **kwargs) mp.set_start_method('fork', force=True) return out return wrapper
Example #17
Source File: whdutil.py From cc-utils with Apache License 2.0 | 5 votes |
def start_whd( cfg_set_name: str, port: int=5000, production: bool=False, workers: int=4, ): import whd.server cfg_factory = ci.util.ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(cfg_set_name) webhook_dispatcher_cfg = cfg_set.webhook_dispatcher() app = whd.server.webhook_dispatcher_app( cfg_set=cfg_set, whd_cfg=webhook_dispatcher_cfg ) # allow external connections any_interface = '0.0.0.0' if production: import bjoern multiprocessing.set_start_method('fork') def serve(): bjoern.run(app, any_interface, port, reuse_port=True) for _ in range(workers - 1): p = multiprocessing.Process(target=serve) p.start() serve() else: import werkzeug.serving werkzeug.serving.run_simple( hostname=any_interface, port=port, application=app, use_reloader=True, use_debugger=True, )
Example #18
Source File: pghoard.py From pghoard with Apache License 2.0 | 5 votes |
def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( prog="pghoard", description="postgresql automatic backup daemon") parser.add_argument("-D", "--debug", help="Enable debug logging", action="store_true") parser.add_argument("--version", action="version", help="show program version", version=version.__version__) parser.add_argument("-s", "--short-log", help="use non-verbose logging format", action="store_true") parser.add_argument("--config", help="configuration file path", default=os.environ.get("PGHOARD_CONFIG")) parser.add_argument("config_file", help="configuration file path (for backward compatibility)", nargs="?") arg = parser.parse_args(args) config_path = arg.config or arg.config_file if not config_path: print("pghoard: config file path must be given with --config or via env PGHOARD_CONFIG") return 1 if not os.path.exists(config_path): print("pghoard: {!r} doesn't exist".format(config_path)) return 1 logutil.configure_logging(short_log=arg.short_log, level=logging.DEBUG if arg.debug else logging.INFO) multiprocessing.set_start_method("forkserver") try: pghoard = PGHoard(config_path) except InvalidConfigurationError as ex: print("pghoard: failed to load config {}: {}".format(config_path, ex)) return 1 return pghoard.run()
Example #19
Source File: QtTestCase.py From urh with GNU General Public License v3.0 | 5 votes |
def setUpClass(cls): import multiprocessing as mp try: mp.set_start_method("spawn") except RuntimeError: pass assert mp.get_start_method() == "spawn" write_settings() cls.app = QApplication([cls.__name__])
Example #20
Source File: a2c.py From gymexperiments with MIT License | 5 votes |
def run(args): # create dummy environment to be able to create model env = gym.make(args.environment) assert isinstance(env.observation_space, Box) assert isinstance(env.action_space, Discrete) print("Observation space:", env.observation_space) print("Action space:", env.action_space) # create main model model = create_model(env, args) model.summary() env.close() # for better compatibility with Theano and Tensorflow multiprocessing.set_start_method('spawn') # create shared buffer for sharing weights blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL) shared_buffer = Array('c', len(blob)) shared_buffer.raw = blob # force runner processes to use cpu os.environ["CUDA_VISIBLE_DEVICES"] = "" # create fifos and threads for all runners fifos = [] for i in range(args.num_runners): fifo = Queue(args.queue_length) fifos.append(fifo) process = Process(target=runner, args=(shared_buffer, fifo, args)) process.start() # start trainer in main thread trainer(model, fifos, shared_buffer, args)
Example #21
Source File: a2c_atari.py From gymexperiments with MIT License | 5 votes |
def run(args): # create dummy environment to be able to create model env = create_env(args.env_id) assert isinstance(env.observation_space, Box) assert isinstance(env.action_space, Discrete) print("Observation space: " + str(env.observation_space)) print("Action space: " + str(env.action_space)) # create main model model = create_model(env, batch_size=args.num_runners, num_steps=args.num_local_steps) model.summary() env.close() # for better compatibility with Theano and Tensorflow multiprocessing.set_start_method('spawn') # create shared buffer for sharing weights blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL) shared_buffer = Array('c', len(blob)) shared_buffer.raw = blob # force runner processes to use cpu, child processes inherit environment variables os.environ["CUDA_VISIBLE_DEVICES"] = "" # create fifos and processes for all runners fifos = [] for i in range(args.num_runners): fifo = Queue(args.queue_length) fifos.append(fifo) process = Process(target=runner, args=(shared_buffer, fifo, args.num_timesteps // args.num_runners, args.monitor and i == 0, args)) process.start() # start trainer in main thread trainer(model, fifos, shared_buffer, args) print("All done")
Example #22
Source File: worker.py From steemdata-mongo with MIT License | 5 votes |
def run_multi(): multiprocessing.set_start_method('spawn') workers = [ 'scrape_all_users', 'scrape_operations', ] with Pool(len(workers)) as p: p.map(run, workers)
Example #23
Source File: base_options.py From deepsaber with GNU General Public License v3.0 | 5 votes |
def parse(self): opt = self.gather_options() opt.is_train = self.is_train # train or test # check options: if opt.loss_weight: opt.loss_weight = [float(w) for w in opt.loss_weight.split(',')] if len(opt.loss_weight) != opt.num_class: raise ValueError("Given {} weights, when {} classes are expected".format( len(opt.loss_weight), opt.num_class)) else: opt.loss_weight = torch.tensor(opt.loss_weight) self.print_options(opt) # set gpu ids str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: opt.gpu_ids.append(id) if len(opt.gpu_ids) > 0: torch.cuda.set_device(opt.gpu_ids[0]) # set multiprocessing if opt.workers > 0 and not opt.fork_processes: mp.set_start_method('spawn', force=True) self.opt = opt return self.opt
Example #24
Source File: layer_manager.py From pyrealtime with MIT License | 5 votes |
def __init__(self): # multiprocessing.set_start_method('spawn') self.layers = {} self.stop_event = multiprocessing.get_context('spawn').Event() self.pause_event = multiprocessing.get_context('spawn').Event() self.input_prompts = multiprocessing.get_context('spawn').Queue() self.show_monitor = False
Example #25
Source File: start_server_and_client.py From reversi_ai with MIT License | 5 votes |
def run(): # ask user for difficulty q_app = QtWidgets.QApplication([]) q_widget = QtWidgets.QWidget() dialog = QtWidgets.QMessageBox(q_widget) dialog.addButton('Easy', QtWidgets.QMessageBox.ActionRole) dialog.addButton('Medium', QtWidgets.QMessageBox.ActionRole) dialog.addButton('Hard', QtWidgets.QMessageBox.ActionRole) dialog.addButton('Impossible', QtWidgets.QMessageBox.ActionRole) dialog.setText('Choose difficulty:') ret = dialog.exec_() easy, medium, hard, impossible = range(4) sim_time = None if ret == easy: sim_time = 1 elif ret == medium: sim_time = 3 elif ret == hard: sim_time = 5 elif ret == impossible: sim_time = 8 mp.set_start_method('spawn') gui_process = mp.Process(target=start_client.main) gui_process.start() run_game.main(BlackAgent='human', WhiteAgent='monte_carlo', sim_time=sim_time, gui=True)
Example #26
Source File: HogwildSparkModel.py From sparkflow with MIT License | 5 votes |
def start_server(self, tg, optimizer, port): """ Starts the server with a copy of the argument for weird tensorflow multiprocessing issues """ try: multiprocessing.set_start_method('spawn') except Exception as e: pass self.server = Process(target=self.start_service, args=(tg, optimizer, port)) self.server.daemon = True self.server.start()
Example #27
Source File: utils.py From bob with GNU General Public License v3.0 | 5 votes |
def __init__(self): import asyncio import multiprocessing import signal import concurrent.futures if sys.platform == 'win32': loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(loop) multiprocessing.set_start_method('spawn') executor = concurrent.futures.ProcessPoolExecutor() else: # The ProcessPoolExecutor is a barely usable for our interactive use # case. On SIGINT any busy executor should stop. The only way how this # does not explode is that we ignore SIGINT before spawning the process # pool and re-enable SIGINT in every executor. In the main process we # have to ignore BrokenProcessPool errors as we will likely hit them. # To "prime" the process pool a dummy workload must be executed because # the processes are spawned lazily. loop = asyncio.get_event_loop() origSigInt = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, signal.SIG_IGN) # fork early before process gets big if sys.platform == 'msys': multiprocessing.set_start_method('fork') else: multiprocessing.set_start_method('forkserver') executor = concurrent.futures.ProcessPoolExecutor() executor.submit(dummy).result() signal.signal(signal.SIGINT, origSigInt) loop.set_default_executor(executor) self.__loop = loop self.__executor = executor
Example #28
Source File: distributed_utils.py From conditional-motion-propagation with MIT License | 5 votes |
def dist_init(launcher, backend='nccl', **kwargs): if mp.get_start_method(allow_none=True) is None: mp.set_start_method('spawn') if launcher == 'pytorch': _init_dist_pytorch(backend, **kwargs) elif launcher == 'mpi': _init_dist_mpi(backend, **kwargs) elif launcher == 'slurm': _init_dist_slurm(backend, **kwargs) else: raise ValueError('Invalid launcher type: {}'.format(launcher))
Example #29
Source File: main.py From clickhouse-mysql-data-reader with MIT License | 5 votes |
def __init__(self): # append 'converter' folder into sys.path # this helps to load custom modules converter_folder = os.path.dirname(os.path.realpath(__file__)) + '/converter' if converter_folder not in sys.path: sys.path.insert(0, converter_folder) # parse CLI options self.config = Config() # first action after config available - setup requested logging level logging.basicConfig( filename=self.config.log_file(), level=self.config.log_level(), format='%(asctime)s/%(created)f:%(levelname)s:%(message)s' ) # and call parent super().__init__(pidfile=self.config.pid_file()) # some verbosity logging.info('Starting') logging.debug(self.config) logging.info("sys.path") logging.info(pprint.pformat(sys.path)) # mp.set_start_method('forkserver')
Example #30
Source File: conftest.py From chainer with MIT License | 5 votes |
def scope_session(): if int(platform.python_version_tuple()[0]) >= 3: multiprocessing.set_start_method('forkserver') p = multiprocessing.Process(target=dummy_func) p.start() p.join() yield