Python multiprocessing.Semaphore() Examples

The following are 30 code examples of multiprocessing.Semaphore(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: _test_multiprocessing.py    From Fluid-Designer with GNU General Public License v3.0 7 votes vote down vote up
def test_timeout(self):
        if self.TYPE != 'processes':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))

        sem = self.Semaphore(0)
        acquire = TimingWrapper(sem.acquire)

        self.assertEqual(acquire(False), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, None), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, TIMEOUT1), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0)

        self.assertEqual(acquire(True, TIMEOUT2), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)

        self.assertEqual(acquire(timeout=TIMEOUT3), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) 
Example #2
Source File: network_semaphore.py    From ACE with Apache License 2.0 6 votes vote down vote up
def initialize_fallback_semaphores():
    """This needs to be called once at the very beginning of starting ACE."""

    # we need some fallback functionality for when the network semaphore server is down
    # these semaphores serve that purpose
    global_engine_instance_count = saq.CONFIG['global'].getint('global_engine_instance_count')
    for key in saq.CONFIG['network_semaphore'].keys():
        if key.startswith('semaphore_'):
            semaphore_name = key[len('semaphore_'):]
            # the configuration settings for the network semaphore specify how many connections
            # are allowed to a specific resource at once, globally
            # so if we unable to coordinate globally, the fall back is to divide the available
            # number of resources between all the engines evenly
            # that's what this next equation is for
            fallback_limit = int(floor(saq.CONFIG['network_semaphore'].getfloat(key) / float(global_engine_instance_count)))
            # we allow a minimum of one per engine
            if fallback_limit < 1:
                fallback_limit = 1

            logging.debug("fallback semaphore count for {0} is {1}".format(semaphore_name, fallback_limit))
            fallback_semaphores[semaphore_name] = LoggingSemaphore(fallback_limit)
            #fallback_semaphores[semaphore_name] = multiprocessing.Semaphore(fallback_limit)
            #fallback_semaphores[semaphore_name].semaphore_name = 'fallback {0}'.format(semaphore_name) 
Example #3
Source File: dist_manager.py    From autogluon with Apache License 2.0 6 votes vote down vote up
def _request(cls, resource):
        """ResourceManager, we recommand using scheduler instead of creating your own
        resource manager.
        """
        assert cls.check_possible(resource), \
            'Requested num_cpu={} and num_gpu={} should be less than or equal to' + \
            'largest node availability CPUs={}, GPUs={}'. \
            format(resource.num_cpus, resource.num_gpus, cls.MAX_GPU_COUNT, cls.MAX_CPU_COUNT)
       
        with cls.LOCK:
            node = cls.check_availability(resource)
            if node is not None:
                cls.NODE_RESOURCE_MANAGER[node]._request(node, resource)
                return

        logger.debug('Appending {} to Request Stack'.format(resource))
        request_semaphore = mp.Semaphore(0)
        with cls.LOCK:
            cls.REQUESTING_STACK.append((resource, request_semaphore))
        request_semaphore.acquire()
        return 
Example #4
Source File: async_rl.py    From rlpyt with MIT License 6 votes vote down vote up
def build_ctrl(self, world_size):
        """
        Builds several parallel communication mechanisms for controlling the
        workflow across processes.
        """
        opt_throttle = (mp.Barrier(world_size) if world_size > 1 else
            None)
        return AttrDict(
            quit=mp.Value('b', lock=True),
            quit_opt=mp.RawValue('b'),
            sample_ready=[mp.Semaphore(0) for _ in range(2)],  # Double buffer.
            sample_copied=[mp.Semaphore(1) for _ in range(2)],
            sampler_itr=mp.Value('l', lock=True),
            opt_throttle=opt_throttle,
            eval_time=mp.Value('d', lock=True),
        ) 
Example #5
Source File: local_timer_test.py    From elastic with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_get_size(self):
        """
        Creates a "producer" process that enqueues ``n`` elements
        every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
        yields all ``n`` elements.
        """
        mp_queue = mp.Queue()
        request_queue = MultiprocessingRequestQueue(mp_queue)
        n = 10
        interval = 0.1
        sem = mp.Semaphore(0)

        p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
        p.start()

        sem.acquire()  # blocks until the process has started to run the function
        timeout = interval * (n + 1)
        start = time.time()
        requests = request_queue.get(n, timeout=timeout)
        self.assertLessEqual(time.time() - start, timeout + interval)
        self.assertEqual(n, len(requests)) 
Example #6
Source File: _test_multiprocessing.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def test_timeout(self):
        if self.TYPE != 'processes':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))

        sem = self.Semaphore(0)
        acquire = TimingWrapper(sem.acquire)

        self.assertEqual(acquire(False), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, None), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, TIMEOUT1), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0)

        self.assertEqual(acquire(True, TIMEOUT2), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)

        self.assertEqual(acquire(timeout=TIMEOUT3), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) 
Example #7
Source File: local_timer_test.py    From elastic with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_get_less_than_size(self):
        """
        Tests slow producer.
        Creates a "producer" process that enqueues ``n`` elements
        every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
        yields at most ``n/2`` elements.
        """
        mp_queue = mp.Queue()
        request_queue = MultiprocessingRequestQueue(mp_queue)
        n = 10
        interval = 0.1
        sem = mp.Semaphore(0)

        p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
        p.start()

        sem.acquire()  # blocks until the process has started to run the function
        requests = request_queue.get(n, timeout=(interval * (n / 2)))
        self.assertLessEqual(n / 2, len(requests)) 
Example #8
Source File: _test_multiprocessing.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 6 votes vote down vote up
def test_timeout(self):
        if self.TYPE != 'processes':
            self.skipTest('test not appropriate for {}'.format(self.TYPE))

        sem = self.Semaphore(0)
        acquire = TimingWrapper(sem.acquire)

        self.assertEqual(acquire(False), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, None), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0.0)

        self.assertEqual(acquire(False, TIMEOUT1), False)
        self.assertTimingAlmostEqual(acquire.elapsed, 0)

        self.assertEqual(acquire(True, TIMEOUT2), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)

        self.assertEqual(acquire(timeout=TIMEOUT3), False)
        self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) 
Example #9
Source File: lock.py    From tatk with Apache License 2.0 5 votes vote down vote up
def create_th_sem(value):
        try:
            th_lock = th.Semaphore(value)  # thread lock
        except OSError:  # pragma: no cover
            th_lock = None
        return th_lock 
Example #10
Source File: lock.py    From tatk with Apache License 2.0 5 votes vote down vote up
def create_mp_sem(value):
        try:
            mp_lock = mp.Semaphore(value)  # multiprocessing lock
        except ImportError:  # pragma: no cover
            mp_lock = None
        except OSError:  # pragma: no cover
            mp_lock = None
        return mp_lock 
Example #11
Source File: ProcessControl.py    From codex-backend with MIT License 5 votes vote down vote up
def __init__(self, forks_number):
        self.forks_number = forks_number
        self.semaphore = Semaphore(self.forks_number) 
Example #12
Source File: sampler.py    From rlpyt with MIT License 5 votes vote down vote up
def _build_parallel_ctrl(self, n_worker):
        super()._build_parallel_ctrl(n_worker)
        self.sync.obs_ready = [mp.Semaphore(0) for _ in range(n_worker)]
        self.sync.act_ready = [mp.Semaphore(0) for _ in range(n_worker)] 
Example #13
Source File: gpu_sampler.py    From rlpyt with MIT License 5 votes vote down vote up
def launch_workers(self, double_buffer_slice, affinity, seed, n_envs_list):
        self.n_worker = n_worker = len(n_envs_list)
        # A little slight-of-hand to make 2-level signal:
        self.ctrl.stop_eval = self.sync.stop_eval
        self.sync = AttrDict(
            obs_ready=[mp.Semaphore(0) for _ in range(n_worker)],
            act_ready=[mp.Semaphore(0) for _ in range(n_worker)],
            stop_eval=mp.RawValue(ctypes.c_bool, False),  # Overwrite.
            # stop_eval=self.ctrl.stop_eval,  # No, make 2-level signal.
            db_idx=self.ctrl.db_idx,  # Copy into sync which passes to Collector.
        )
        self.step_buffer_pyt, self.step_buffer_np = build_step_buffer(
            self.examples, sum(n_envs_list))
        self.agent_inputs = AgentInputs(self.step_buffer_pyt.observation,
            self.step_buffer_pyt.action, self.step_buffer_pyt.reward)

        if self.eval_n_envs > 0:
            eval_n_envs = self.eval_n_envs_per * n_worker
            eval_step_buffer_pyt, eval_step_buffer_np = build_step_buffer(
                self.examples, eval_n_envs)
            self.eval_step_buffer_pyt = eval_step_buffer_pyt
            self.eval_step_buffer_np = eval_step_buffer_np
            self.eval_agent_inputs = AgentInputs(
                self.eval_step_buffer_pyt.observation,
                self.eval_step_buffer_pyt.action,
                self.eval_step_buffer_pyt.reward,
            )
            # eval_max_T already made in earlier initialize.

        self.double_buffer = double_buffer_slice  # Now only see my part.
        common_kwargs = self._assemble_common_kwargs(affinity)
        common_kwargs["agent"] = None  # Remove.
        workers_kwargs = self._assemble_workers_kwargs(affinity, seed,
            n_envs_list)

        # Yes, fork again.
        self.workers = [mp.Process(target=sampling_process,
            kwargs=dict(common_kwargs=common_kwargs, worker_kwargs=w_kwargs))
            for w_kwargs in workers_kwargs]
        for w in self.workers:
            w.start() 
Example #14
Source File: shared_memory.py    From tensorflow-rl with Apache License 2.0 5 votes vote down vote up
def __init__(self, n):
        self.n = n
        self.counter = SharedCounter(0)
        self.barrier = Semaphore(0) 
Example #15
Source File: _test_multiprocessing.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def test_semaphore(self):
        sem = self.Semaphore(2)
        self._test_semaphore(sem)
        self.assertEqual(sem.release(), None)
        self.assertReturnsIfImplemented(3, get_value, sem)
        self.assertEqual(sem.release(), None)
        self.assertReturnsIfImplemented(4, get_value, sem) 
Example #16
Source File: _test_multiprocessing.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 5 votes vote down vote up
def test_waitfor_timeout(self):
        # based on test in test/lock_tests.py
        cond = self.Condition()
        state = self.Value('i', 0)
        success = self.Value('i', False)
        sem = self.Semaphore(0)

        p = self.Process(target=self._test_waitfor_timeout_f,
                         args=(cond, state, success, sem))
        p.daemon = True
        p.start()
        self.assertTrue(sem.acquire(timeout=10))

        # Only increment 3 times, so state == 4 is never reached.
        for i in range(3):
            time.sleep(0.01)
            with cond:
                state.value += 1
                cond.notify()

        p.join(5)
        self.assertTrue(success.value) 
Example #17
Source File: syncer.py    From private-file-saver with MIT License 5 votes vote down vote up
def __init__(self, bucket_name=None, target_path=None, max_workers=None, s3_client=None):
        self._target_path = target_path or configs.TARGET_PATH
        self._bucket_name = bucket_name or configs.DEFAULT_BUCKET_NAME

        assert max_workers is None or max_workers > 0, "max_workers must not be less than 1"
        self._max_workers = max_workers or configs.MAX_CONCURRENCY

        self._client = s3_client or S3Client(self._bucket_name)
        self._meta_store = MetaStore(db_path=self._target_path,
                                     target_path=self._target_path)

        self._dry_run = False

        self._upload_queue = multiprocessing.JoinableQueue()  # Files to be uploaded
        self._files_to_be_uploaded = multiprocessing.Semaphore(value=0) 
Example #18
Source File: downloader.py    From private-file-saver with MIT License 5 votes vote down vote up
def __init__(self, bucket_name=None, target_path=None, max_worker=None, s3_client=None):
        self._target_path = target_path or configs.TARGET_PATH
        self._max_workers = max_worker or configs.MAX_CONCURRENCY
        self._bucket_name = bucket_name or configs.DEFAULT_BUCKET_NAME
        self._s3_client = s3_client or S3Client(bucket_name=self._bucket_name)

        self._dry_run = True

        self._download_queue = multiprocessing.JoinableQueue()  # Objects to be downloaded
        self._files_to_be_downloaded = multiprocessing.Semaphore(value=0) 
Example #19
Source File: communicator.py    From redqueen with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, num_processes=1, tasks_per_requests=1, bitmap_size=(64 << 10)):
        self.to_update_queue = multiprocessing.Queue()
        self.to_master_queue = multiprocessing.Queue()
        self.to_master_from_mapserver_queue = multiprocessing.Queue()
        self.to_master_from_slave_queue = multiprocessing.Queue()
        self.to_mapserver_queue = multiprocessing.Queue()

        self.to_slave_queues = []
        for i in range(num_processes):
            self.to_slave_queues.append(multiprocessing.Queue())

        self.slave_locks_A = []
        self.slave_locks_B = []
        for i in range(num_processes):
            self.slave_locks_A.append(multiprocessing.Lock())
            self.slave_locks_B.append(multiprocessing.Lock())
            self.slave_locks_B[i].acquire()

        self.reload_semaphore = multiprocessing.Semaphore(multiprocessing.cpu_count()/2)
        self.num_processes = num_processes
        self.tasks_per_requests = tasks_per_requests

        self.stage_abortion_notifier = multiprocessing.Value('b', False)
        self.slave_termination = multiprocessing.Value('b', False, lock=False)
        self.sampling_failed_notifier = multiprocessing.Value('b', False)
        self.effector_mode = multiprocessing.Value('b', False)
        self.effector_mode_hash_a = multiprocessing.Value('l', 0)
        self.effector_mode_hash_b = multiprocessing.Value('l', 0)


        self.files = ["/dev/shm/kafl_fuzzer_master_", "/dev/shm/kafl_fuzzer_mapserver_", "/dev/shm/kafl_fuzzer_bitmap_"]
        self.sizes = [(65 << 10), (65 << 10), bitmap_size]
        self.tmp_shm = [{}, {}, {}] 
Example #20
Source File: cxxd_callbacks.py    From cxxd with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.type = {
            'type_deduction'     : TypeDeductionCallbackResult(),
            'go_to_definition'   : GoToDefinitionCallbackResult(),
            'go_to_include'      : GoToIncludeCallbackResult(),
            'semantic_syntax_hl' : SemanticSyntaxHighlightCallbackResult(),
            'diagnostics'        : DiagnosticsCallbackResult(),
            'indexer'            : IndexerCallbackResult()
        }
        self.wait_on_completion = multiprocessing.Semaphore(0) 
Example #21
Source File: cxxd_callbacks.py    From cxxd with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.wait_on_completion = multiprocessing.Semaphore(0)
        self.clang_format_status = multiprocessing.Value(ctypes.c_bool, False) 
Example #22
Source File: cxxd_callbacks.py    From cxxd with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.wait_on_completion = multiprocessing.Semaphore(0)
        self.clang_tidy_status = multiprocessing.Value(ctypes.c_bool, False)
        self.clang_tidy_output = multiprocessing.Array(ctypes.c_char, ClangTidyCallbackResult.OUTPUT_FILENAME_LENGTH_MAX) 
Example #23
Source File: cxxd_callbacks.py    From cxxd with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.wait_on_completion = multiprocessing.Semaphore(0)
        self.project_builder_status = multiprocessing.Value(ctypes.c_bool, False)
        self.project_builder_output = multiprocessing.Array(ctypes.c_char, ProjectBuilderCallbackResult.OUTPUT_FILENAME_LENGTH_MAX) 
Example #24
Source File: reporter.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def __init__(self, dict_path=None):#, result_queue, continue_semaphore):
        self._queue = mp.Queue(1)
        self._stop = mp.Value('i', 0)
        self._last_report_time = None
        self._continue_semaphore = mp.Semaphore(0)
        self._last_report_time = time.time()
        self._save_dict = False
        self.dict_path = dict_path 
Example #25
Source File: _test_multiprocessing.py    From Fluid-Designer with GNU General Public License v3.0 5 votes vote down vote up
def test_semaphore(self):
        sem = self.Semaphore(2)
        self._test_semaphore(sem)
        self.assertEqual(sem.release(), None)
        self.assertReturnsIfImplemented(3, get_value, sem)
        self.assertEqual(sem.release(), None)
        self.assertReturnsIfImplemented(4, get_value, sem) 
Example #26
Source File: tool.py    From Bowler with MIT License 5 votes vote down vote up
def __init__(
        self,
        fixers: Fixers,
        *args,
        interactive: bool = True,
        write: bool = False,
        silent: bool = False,
        in_process: Optional[bool] = None,
        hunk_processor: Processor = None,
        filename_matcher: Optional[FilenameMatcher] = None,
        **kwargs,
    ) -> None:
        options = kwargs.pop("options", {})
        super().__init__(fixers, *args, options=options, **kwargs)
        self.queue_count = 0
        self.queue = multiprocessing.JoinableQueue()  # type: ignore
        self.results = multiprocessing.Queue()  # type: ignore
        self.semaphore = multiprocessing.Semaphore(self.NUM_PROCESSES)
        self.interactive = interactive
        self.write = write
        self.silent = silent
        if in_process is None:
            in_process = self.IN_PROCESS
        # pick the most restrictive of flags; we can pickle fixers when
        # using spawn.
        if sys.platform == "win32" or sys.version_info > (3, 7):
            in_process = True
        self.in_process = in_process
        self.exceptions: List[BowlerException] = []
        if hunk_processor is not None:
            self.hunk_processor = hunk_processor
        else:
            self.hunk_processor = lambda f, h: True
        self.filename_matcher = filename_matcher or filename_endswith(".py") 
Example #27
Source File: shared_utils.py    From async-deep-rl with Apache License 2.0 5 votes vote down vote up
def __init__(self, n):
        self.n = n
        self.counter = SharedCounter(0)
        self.barrier = Semaphore(0) 
Example #28
Source File: test_process_utils.py    From airflow with Apache License 2.0 5 votes vote down vote up
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
        def signal_handler(unused_signum, unused_frame):
            pass
        os.setsid()
        signal.signal(signal.SIGTERM, signal_handler)
        child_setup_done = multiprocessing.Semaphore(0)
        child = multiprocessing.Process(target=TestReapProcessGroup._ignores_sigterm,
                                        args=[child_pid, child_setup_done])
        child.start()
        child_setup_done.acquire(timeout=5.0)
        parent_pid.value = os.getpid()
        setup_done.release()
        while True:
            time.sleep(1) 
Example #29
Source File: test_process_utils.py    From airflow with Apache License 2.0 5 votes vote down vote up
def test_reap_process_group(self):
        """
        Spin up a process that can't be killed by SIGTERM and make sure
        it gets killed anyway.
        """
        parent_setup_done = multiprocessing.Semaphore(0)
        parent_pid = multiprocessing.Value('i', 0)
        child_pid = multiprocessing.Value('i', 0)
        args = [parent_pid, child_pid, parent_setup_done]
        parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
        try:
            parent.start()
            self.assertTrue(parent_setup_done.acquire(timeout=5.0))
            self.assertTrue(psutil.pid_exists(parent_pid.value))
            self.assertTrue(psutil.pid_exists(child_pid.value))

            process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)

            self.assertFalse(psutil.pid_exists(parent_pid.value))
            self.assertFalse(psutil.pid_exists(child_pid.value))
        finally:
            try:
                os.kill(parent_pid.value, signal.SIGKILL)  # terminate doesnt work here
                os.kill(child_pid.value, signal.SIGKILL)  # terminate doesnt work here
            except OSError:
                pass 
Example #30
Source File: datagen_para_branch.py    From Convolutional-Pose-Machine-tf with GNU Lesser General Public License v3.0 5 votes vote down vote up
def generator(self, *args, **kwargs):
        """ This function warp generator to ParaWrapper's generator
            which is capable of multi-processing
            Once the generator function was settled, we can send worker with the task then
            work with full-load until meet the buff_size limit

            The worker's job is to feed the list and keep it contains more than <buff_size> batches
        """
        #   Initialization semaphores and numbering
        buff_count = Semaphore(value=0)
        target_remain = Semaphore(value=self.buff_size)
        number = str(self.gen_num)
        self.gen_num += 1

        #   Initializing list
        self.batch_list[number] = self.manager.list()

        #   Assign work and send worker
        gen = self.datagen.generator(*args, **kwargs)
        worker = Process(target=self.task, args=(gen,number,target_remain, buff_count))
        worker.start()

        while True:
            buff_count.acquire(block=True)
            ret = self.batch_list[number].pop()
            target_remain.release()
            yield ret