Python multiprocessing.managers.SyncManager() Examples
The following are 22
code examples of multiprocessing.managers.SyncManager().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
multiprocessing.managers
, or try the search function
.
Example #1
Source File: distributed_factor.py From code-for-blog with The Unlicense | 5 votes |
def make_client_manager(ip, port, authkey): class ServerQueueManager(SyncManager): pass ServerQueueManager.register('get_job_q') ServerQueueManager.register('get_result_q') manager = ServerQueueManager(address=(ip, port), authkey=authkey) manager.connect() print('Client connected to %s:%s' % (ip, port)) return manager
Example #2
Source File: __init__.py From ironpython2 with Apache License 2.0 | 5 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #3
Source File: worker.py From get-weather-data with MIT License | 5 votes |
def make_worker_manager(ip, port, authkey): """ Create a manager for a client. This manager connects to a server on the given address and exposes the get_job_q and get_result_q methods for accessing the shared queues from the server. Return a manager object. """ class ServerQueueManager(SyncManager): pass ServerQueueManager.register('get_job_q') ServerQueueManager.register('get_result_q') manager = ServerQueueManager(address=(ip, port), authkey=authkey) manager.connect() logging.info("Worker connected to {:s}:{:d}".format(ip, port)) return manager
Example #4
Source File: manticore.py From manticore with GNU Affero General Public License v3.0 | 5 votes |
def _manticore_multiprocessing(self): def raise_signal(): signal.signal(signal.SIGINT, signal.SIG_IGN) self._worker_type = WorkerProcess # This is the global manager that will handle all shared memory access # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager self._manager = SyncManager() self._manager.start(raise_signal) # The main manticore lock. Acquire this for accessing shared objects # THINKME: we use the same lock to access states lists and shared contexts self._lock = self._manager.Condition() self._killed = self._manager.Value(bool, False) self._running = self._manager.Value(bool, False) # List of state ids of States on storage self._ready_states = self._manager.list() self._terminated_states = self._manager.list() self._busy_states = self._manager.list() self._killed_states = self._manager.list() self._shared_context = self._manager.dict() self._context_value_types = {list: self._manager.list, dict: self._manager.dict} # Decorators added first for convenience.
Example #5
Source File: __init__.py From jawfish with MIT License | 5 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m #brython fix me #def Pipe(duplex=True): # ''' # Returns two connection object connected by a pipe # ''' # from multiprocessing.connection import Pipe # return Pipe(duplex)
Example #6
Source File: brain_multiprocessing.py From pySINDy with MIT License | 4 votes |
def _multiprocessing_transform(): module = astroid.parse( """ from multiprocessing.managers import SyncManager def Manager(): return SyncManager() """ ) if not PY34: return module # On Python 3.4, multiprocessing uses a getattr lookup inside contexts, # in order to get the attributes they need. Since it's extremely # dynamic, we use this approach to fake it. node = astroid.parse( """ from multiprocessing.context import DefaultContext, BaseContext default = DefaultContext() base = BaseContext() """ ) try: context = next(node["default"].infer()) base = next(node["base"].infer()) except exceptions.InferenceError: return module for node in (context, base): for key, value in node.locals.items(): if key.startswith("_"): continue value = value[0] if isinstance(value, astroid.FunctionDef): # We need to rebound this, since otherwise # it will have an extra argument (self). value = astroid.BoundMethod(value, node) module[key] = value return module
Example #7
Source File: __init__.py From unity-python with MIT License | 4 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #8
Source File: __init__.py From PokemonGo-DesktopMap with MIT License | 4 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #9
Source File: distributed_factor.py From code-for-blog with The Unlicense | 4 votes |
def make_server_manager(port, authkey): job_q = queue.Queue() result_q = queue.Queue() class JobQueueManager(SyncManager): pass JobQueueManager.register('get_job_q', callable=lambda: job_q) JobQueueManager.register('get_result_q', callable=lambda: result_q) manager = JobQueueManager(address=('', port), authkey=authkey) manager.start() print('Server started at port %s' % port) return manager
Example #10
Source File: executor.py From bionic with Apache License 2.0 | 4 votes |
def _receive(self): while True: # If we don't have any listeners, we don't want to be checking # the queue. In particular, since this is a daemon thread, it # will keep running up until the process exits, and at that time # the queue object can become unreliable because it's managed by # a SyncManager that depends on a separate process. This listener # check makes sure we're only using the queue during an actual # Flow.get call. # See here for what happens if we don't have this check: # https://github.com/square/bionic/issues/161 self._event_has_listeners.wait() if self._queue.empty(): self._event_queue_is_empty.set() else: self._event_queue_is_empty.clear() try: record = self._queue.get(timeout=0.05) except queue.Empty: # Nothing to receive from the queue. continue logger = logging.getLogger(record.name) try: if logger.isEnabledFor(record.levelno): logger.handle(record) except (BrokenPipeError, EOFError): break except Exception as e: logger = logging.getLogger() try: logger.warn("exception while logging ", e) except (BrokenPipeError, EOFError): break except Exception: traceback.print_exc(file=sys.stderr)
Example #11
Source File: samplers.py From mici with MIT License | 4 votes |
def _ignore_sigint_manager(): """Context-managed SyncManager which ignores SIGINT interrupt signals.""" manager = SyncManager() try: manager.start(_ignore_sigint_initializer) yield manager finally: manager.shutdown()
Example #12
Source File: __init__.py From Splunking-Crime with GNU Affero General Public License v3.0 | 4 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #13
Source File: local_executor.py From airflow with Apache License 2.0 | 4 votes |
def __init__(self, parallelism: int = PARALLELISM): super().__init__(parallelism=parallelism) self.manager: Optional[SyncManager] = None self.result_queue: Optional['Queue[TaskInstanceStateType]'] = None self.workers: List[QueuedLocalWorker] = [] self.workers_used: int = 0 self.workers_active: int = 0 self.impl: Optional[Union['LocalExecutor.UnlimitedParallelism', 'LocalExecutor.LimitedParallelism']] = None
Example #14
Source File: __init__.py From oss-ftp with MIT License | 4 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #15
Source File: __init__.py From BinderFilter with MIT License | 4 votes |
def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m
Example #16
Source File: brain_multiprocessing.py From python-netsurv with MIT License | 4 votes |
def _multiprocessing_transform(): module = astroid.parse( """ from multiprocessing.managers import SyncManager def Manager(): return SyncManager() """ ) if not PY34: return module # On Python 3.4, multiprocessing uses a getattr lookup inside contexts, # in order to get the attributes they need. Since it's extremely # dynamic, we use this approach to fake it. node = astroid.parse( """ from multiprocessing.context import DefaultContext, BaseContext default = DefaultContext() base = BaseContext() """ ) try: context = next(node["default"].infer()) base = next(node["base"].infer()) except exceptions.InferenceError: return module for node in (context, base): for key, value in node.locals.items(): if key.startswith("_"): continue value = value[0] if isinstance(value, astroid.FunctionDef): # We need to rebound this, since otherwise # it will have an extra argument (self). value = astroid.BoundMethod(value, node) module[key] = value return module
Example #17
Source File: brain_multiprocessing.py From python-netsurv with MIT License | 4 votes |
def _multiprocessing_transform(): module = astroid.parse( """ from multiprocessing.managers import SyncManager def Manager(): return SyncManager() """ ) if not PY34: return module # On Python 3.4, multiprocessing uses a getattr lookup inside contexts, # in order to get the attributes they need. Since it's extremely # dynamic, we use this approach to fake it. node = astroid.parse( """ from multiprocessing.context import DefaultContext, BaseContext default = DefaultContext() base = BaseContext() """ ) try: context = next(node["default"].infer()) base = next(node["base"].infer()) except exceptions.InferenceError: return module for node in (context, base): for key, value in node.locals.items(): if key.startswith("_"): continue value = value[0] if isinstance(value, astroid.FunctionDef): # We need to rebound this, since otherwise # it will have an extra argument (self). value = astroid.BoundMethod(value, node) module[key] = value return module
Example #18
Source File: brain_multiprocessing.py From linter-pylama with MIT License | 4 votes |
def _multiprocessing_transform(): module = astroid.parse(''' from multiprocessing.managers import SyncManager def Manager(): return SyncManager() ''') if not PY34: return module # On Python 3.4, multiprocessing uses a getattr lookup inside contexts, # in order to get the attributes they need. Since it's extremely # dynamic, we use this approach to fake it. node = astroid.parse(''' from multiprocessing.context import DefaultContext, BaseContext default = DefaultContext() base = BaseContext() ''') try: context = next(node['default'].infer()) base = next(node['base'].infer()) except exceptions.InferenceError: return module for node in (context, base): for key, value in node.locals.items(): if key.startswith("_"): continue value = value[0] if isinstance(value, astroid.FunctionDef): # We need to rebound this, since otherwise # it will have an extra argument (self). value = astroid.BoundMethod(value, node) module[key] = value return module
Example #19
Source File: brain_multiprocessing.py From pySINDy with MIT License | 3 votes |
def _multiprocessing_managers_transform(): return astroid.parse( """ import array import threading import multiprocessing.pool as pool import six class Namespace(object): pass class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class SyncManager(object): Queue = JoinableQueue = six.moves.queue.Queue Event = threading.Event RLock = threading.RLock BoundedSemaphore = threading.BoundedSemaphore Condition = threading.Condition Barrier = threading.Barrier Pool = pool.Pool list = list dict = dict Value = Value Array = Array Namespace = Namespace __enter__ = lambda self: self __exit__ = lambda *args: args def start(self, initializer=None, initargs=None): pass def shutdown(self): pass """ )
Example #20
Source File: brain_multiprocessing.py From python-netsurv with MIT License | 3 votes |
def _multiprocessing_managers_transform(): return astroid.parse( """ import array import threading import multiprocessing.pool as pool import six class Namespace(object): pass class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class SyncManager(object): Queue = JoinableQueue = six.moves.queue.Queue Event = threading.Event RLock = threading.RLock BoundedSemaphore = threading.BoundedSemaphore Condition = threading.Condition Barrier = threading.Barrier Pool = pool.Pool list = list dict = dict Value = Value Array = Array Namespace = Namespace __enter__ = lambda self: self __exit__ = lambda *args: args def start(self, initializer=None, initargs=None): pass def shutdown(self): pass """ )
Example #21
Source File: brain_multiprocessing.py From python-netsurv with MIT License | 3 votes |
def _multiprocessing_managers_transform(): return astroid.parse( """ import array import threading import multiprocessing.pool as pool import six class Namespace(object): pass class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class SyncManager(object): Queue = JoinableQueue = six.moves.queue.Queue Event = threading.Event RLock = threading.RLock BoundedSemaphore = threading.BoundedSemaphore Condition = threading.Condition Barrier = threading.Barrier Pool = pool.Pool list = list dict = dict Value = Value Array = Array Namespace = Namespace __enter__ = lambda self: self __exit__ = lambda *args: args def start(self, initializer=None, initargs=None): pass def shutdown(self): pass """ )
Example #22
Source File: brain_multiprocessing.py From linter-pylama with MIT License | 3 votes |
def _multiprocessing_managers_transform(): return astroid.parse(''' import array import threading import multiprocessing.pool as pool import six class Namespace(object): pass class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) class SyncManager(object): Queue = JoinableQueue = six.moves.queue.Queue Event = threading.Event RLock = threading.RLock BoundedSemaphore = threading.BoundedSemaphore Condition = threading.Condition Barrier = threading.Barrier Pool = pool.Pool list = list dict = dict Value = Value Array = Array Namespace = Namespace __enter__ = lambda self: self __exit__ = lambda *args: args def start(self, initializer=None, initargs=None): pass def shutdown(self): pass ''')