Python concurrent.futures.FIRST_COMPLETED Examples
The following are 30
code examples of concurrent.futures.FIRST_COMPLETED().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
concurrent.futures
, or try the search function
.
Example #1
Source File: _base.py From loky with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _create_and_install_waiters(fs, return_when): if return_when == _AS_COMPLETED: waiter = _AsCompletedWaiter() elif return_when == FIRST_COMPLETED: waiter = _FirstCompletedWaiter() else: pending_count = sum( f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) if return_when == FIRST_EXCEPTION: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) elif return_when == ALL_COMPLETED: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) else: raise ValueError("Invalid return condition: %r" % return_when) for f in fs: f._waiters.append(waiter) return waiter
Example #2
Source File: dry_run.py From batch-scoring with BSD 3-Clause "New" or "Revised" License | 6 votes |
def perform_requests(self): signal.signal(signal.SIGINT, self.exit_fast) signal.signal(signal.SIGTERM, self.exit_fast) self.state = b'E' for q_batch in self.get_batch(): for (_, _) in self.split_batch(q_batch): if self.state != b"R": self.state = b'R' yield continue # wait for all batches to finish before returning self.state = b'W' while self.futures: f_len = len(self.futures) self.futures = [i for i in self.futures if not i.done()] if f_len != len(self.futures): self.ui.debug('Waiting for final requests to finish. ' 'remaining requests: {}' ''.format(len(self.futures))) wait(self.futures, return_when=FIRST_COMPLETED) self.state = b'D' yield True
Example #3
Source File: anim.py From uchroma with GNU Lesser General Public License v3.0 | 6 votes |
def _get_layers(self): """ Wait for renderers to produce new layers, yields until at least one layer is active. """ # schedule tasks to wait on each renderer queue for r_idx, _ in enumerate(self.layers): if _.waiter is None or _.waiter.done(): _.waiter = ensure_future(self._dequeue(r_idx)) # async wait for at least one completion waiters = [layer.waiter for layer in self.layers] if not waiters: return await asyncio.wait(waiters, return_when=futures.FIRST_COMPLETED) # check the rest without waiting for r_idx, _ in enumerate(self.layers): if _.waiter is not None and not _.waiter.done(): self._dequeue_nowait(r_idx)
Example #4
Source File: runner.py From adaptive with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _run(self): first_completed = concurrent.FIRST_COMPLETED if self._get_max_tasks() < 1: raise RuntimeError("Executor has no workers") try: while not self.goal(self.learner): futures = self._get_futures() done, _ = concurrent.wait(futures, return_when=first_completed) self._process_futures(done) finally: remaining = self._remove_unfinished() if remaining: concurrent.wait(remaining) self._cleanup()
Example #5
Source File: runner.py From adaptive with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _run(self): first_completed = asyncio.FIRST_COMPLETED if self._get_max_tasks() < 1: raise RuntimeError("Executor has no workers") try: while not self.goal(self.learner): futures = self._get_futures() done, _ = await asyncio.wait( futures, return_when=first_completed, loop=self.ioloop ) self._process_futures(done) finally: remaining = self._remove_unfinished() if remaining: await asyncio.wait(remaining) self._cleanup()
Example #6
Source File: deriver.py From bionic with Apache License 2.0 | 5 votes |
def _wait_on_in_progress_entries(self): "Waits on any in-progress entry to finish." futures = [entry.future for entry in self._in_progress_entries.values()] finished_futures, _ = wait(futures, return_when=FIRST_COMPLETED) for finished_future in finished_futures: task_key = finished_future.result() entry = self._entries_by_task_key[task_key] entry.state.sync_after_subprocess_completion() self._mark_entry_completed(entry)
Example #7
Source File: _test_process_executor.py From loky with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait([CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) assert set([future1]) == done assert set([CANCELLED_FUTURE, future2]) == not_done
Example #8
Source File: _test_process_executor.py From loky with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) assert (set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]) == finished) assert set([future1]) == pending
Example #9
Source File: executor.py From plugin.video.openmeta with GNU General Public License v3.0 | 5 votes |
def _batched_pool_runner(pool, batch_size, f, iterable): it = iter(iterable) futures = set(pool.submit(f, x) for x in islice(it, batch_size)) while futures: done, futures = wait(futures, return_when=FIRST_COMPLETED) futures.update(pool.submit(f, x) for x in islice(it, len(done))) for d in done: yield d
Example #10
Source File: test_concurrent_futures.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
Example #11
Source File: test_concurrent_futures.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending)
Example #12
Source File: map_async_iterator.py From graphql-core with MIT License | 5 votes |
def __anext__(self) -> Any: if self.is_closed: if not isasyncgen(self.iterator): raise StopAsyncIteration value = await self.iterator.__anext__() result = self.callback(value) else: aclose = ensure_future(self._close_event.wait()) anext = ensure_future(self.iterator.__anext__()) pending: Set[Future] = ( await wait([aclose, anext], return_when=FIRST_COMPLETED) )[1] for task in pending: task.cancel() if aclose.done(): raise StopAsyncIteration error = anext.exception() if error: if not self.reject_callback or isinstance( error, (StopAsyncIteration, GeneratorExit) ): raise error result = self.reject_callback(error) else: value = anext.result() result = self.callback(value) return await result if isawaitable(result) else result
Example #13
Source File: util.py From rep0st with MIT License | 5 votes |
def batched_pool_runner(f, iterable, pool, batch_size): # http://code.activestate.com/lists/python-list/666786/ it = iter(iterable) # Submit the first batch of tasks. futures = set(pool.submit(f, x) for x in islice(it, batch_size)) while futures: done, futures = wait(futures, return_when=FIRST_COMPLETED) # Replenish submitted tasks up to the number that completed. futures.update(pool.submit(f, x) for x in islice(it, len(done))) yield from done
Example #14
Source File: background_tasks.py From PerfKitBenchmarker with Apache License 2.0 | 5 votes |
def AwaitAnyTask(self): completed_tasks = None while not completed_tasks: completed_tasks, _ = futures.wait( self._active_futures, timeout=_LONG_TIMEOUT, return_when=futures.FIRST_COMPLETED) future = completed_tasks.pop() task_id = self._active_futures.pop(future) task = self.tasks[task_id] task.return_value, task.traceback = future.result() return task_id
Example #15
Source File: proxy.py From get_jobs with MIT License | 5 votes |
def thread_exector(thread, res): """ 线程池启动 :param thread: 线程池对象 :param res: 自定义ThreadProxy对象 :return: """ tasks = [thread.submit(res.get_test_proxy, proxy) for proxy in res.proxy_list] # wait(tasks, return_when=FIRST_COMPLETED) thread.shutdown() result = [obj for obj in as_completed(tasks)] return result
Example #16
Source File: test_concurrent_futures.py From android_universal with MIT License | 5 votes |
def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
Example #17
Source File: test_concurrent_futures.py From android_universal with MIT License | 5 votes |
def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending)
Example #18
Source File: core.py From moneywagon with MIT License | 5 votes |
def _get_results(FetcherClass, services, kwargs, num_results=None, fast=0, verbose=False, timeout=None): """ Does the fetching in multiple threads of needed. Used by paranoid and fast mode. """ results = [] if not num_results or fast: num_results = len(services) with futures.ThreadPoolExecutor(max_workers=len(services)) as executor: fetches = {} for service in services[:num_results]: tail = [x for x in services if x is not service] random.shuffle(tail) srv = FetcherClass(services=[service] + tail, verbose=verbose, timeout=timeout) fetches[executor.submit(srv.action, **kwargs)] = srv if fast == 1: raise NotImplementedError # ths code is a work in progress. futures.FIRST_COMPLETED works differently than I thought... to_iterate, still_going = futures.wait(fetches, return_when=futures.FIRST_COMPLETED) for x in still_going: try: x.result(timeout=1.001) except futures._base.TimeoutError: pass elif fast > 1: raise Exception("fast level greater than 1 not yet implemented") else: to_iterate = futures.as_completed(fetches) for future in to_iterate: service = fetches[future] results.append([service, future.result()]) return results
Example #19
Source File: threads.py From hiku with BSD 3-Clause "New" or "Revised" License | 5 votes |
def process(self, queue, workflow): while queue.__futures__: done, _ = wait(queue.__futures__, return_when=FIRST_COMPLETED) queue.progress(done) return workflow.result()
Example #20
Source File: waiters.py From futurist with Apache License 2.0 | 5 votes |
def wait_for_any(fs, timeout=None): """Wait for one (**any**) of the futures to complete. Works correctly with both green and non-green futures (but not both together, since this can't be guaranteed to avoid dead-lock due to how the waiting implementations are different when green threads are being used). Returns pair (done futures, not done futures). """ return _wait_for(fs, futures.FIRST_COMPLETED, _wait_for_any_green, 'wait_for_any', timeout=timeout)
Example #21
Source File: gthread.py From Flask-P2P with MIT License | 5 votes |
def run(self): # init listeners, add them to the event loop for s in self.sockets: s.setblocking(False) self.poller.register(s, selectors.EVENT_READ, self.accept) timeout = self.cfg.timeout or 0.5 while self.alive: # notify the arbiter we are alive self.notify() # can we accept more connections? if self.nr < self.worker_connections: # wait for an event events = self.poller.select(0.02) for key, mask in events: callback = key.data callback(key.fileobj) if not self.is_parent_alive(): break # hanle keepalive timeouts self.murder_keepalived() # if the number of connections is < to the max we can handle at # the same time there is no need to wait for one if len(self.futures) < self.cfg.threads: continue result = futures.wait(self.futures, timeout=timeout, return_when=futures.FIRST_COMPLETED) if not result.done: break else: [self.futures.remove(f) for f in result.done] self.tpool.shutdown(False) self.poller.close()
Example #22
Source File: test_concurrent_futures.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending)
Example #23
Source File: test_concurrent_futures.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
Example #24
Source File: test_polling.py From civis-python with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_wait(self): done, not_done = futures.wait([QUEUED_RESULT, FINISHED_RESULT], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([FINISHED_RESULT]), done) self.assertEqual(set([QUEUED_RESULT]), not_done)
Example #25
Source File: test_concurrent_futures.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_first_completed_some_already_completed(self): future1 = self.executor.submit(time.sleep, 1.5) finished, pending = futures.wait( [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], return_when=futures.FIRST_COMPLETED) self.assertEqual( set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), finished) self.assertEqual(set([future1]), pending)
Example #26
Source File: test_concurrent_futures.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) done, not_done = futures.wait( [CANCELLED_FUTURE, future1, future2], return_when=futures.FIRST_COMPLETED) self.assertEqual(set([future1]), done) self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
Example #27
Source File: async_arctic.py From arctic with GNU Lesser General Public License v2.1 | 5 votes |
def wait_any_request(requests, do_raise=False, timeout=None): if not AsyncArctic._wait_until_scheduled(requests, timeout): raise AsyncArcticException("Timed-out while waiting for request to be scheduled") while requests and not any(r.is_completed for r in requests): AsyncArctic.wait_tasks(tuple(r.future for r in requests if not r.is_completed and r.future is not None), timeout=timeout, return_when=FIRST_COMPLETED, raise_exceptions=do_raise)
Example #28
Source File: engine_v2.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def start(self, jobs=None): """ Engine starts to run jobs :param jobs: A list contains at least one job :return: """ try: if not jobs: logger.warning("CloudConnectEngine just exits with no jobs to run") return for job in jobs: self._add_job(job) while not self._shutdown: logger.info("CloudConnectEngine starts to run...") if not self._pending_job_results: logger.info("CloudConnectEngine has no more jobs to run") break # check the intermediate results to find the done jobs and not # done jobs done_and_not_done_jobs = cf.wait(self._pending_job_results, return_when=cf.FIRST_COMPLETED) self._pending_job_results = done_and_not_done_jobs.not_done done_job_results = done_and_not_done_jobs.done for future in done_job_results: # get the result of each done jobs and add new jobs to the # engine if the result spawns more jobs result = future.result() if result: if isinstance(result, Iterable): for temp in result: self._add_job(temp) else: self._add_job(result) except: logger.exception("CloudConnectEngine encountered exception") finally: self._teardown()
Example #29
Source File: engine_v2.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def start(self, jobs=None): """ Engine starts to run jobs :param jobs: A list contains at least one job :return: """ try: if not jobs: logger.warning("CloudConnectEngine just exits with no jobs to run") return for job in jobs: self._add_job(job) while not self._shutdown: logger.info("CloudConnectEngine starts to run...") if not self._pending_job_results: logger.info("CloudConnectEngine has no more jobs to run") break # check the intermediate results to find the done jobs and not # done jobs done_and_not_done_jobs = cf.wait(self._pending_job_results, return_when=cf.FIRST_COMPLETED) self._pending_job_results = done_and_not_done_jobs.not_done done_job_results = done_and_not_done_jobs.done for future in done_job_results: # get the result of each done jobs and add new jobs to the # engine if the result spawns more jobs result = future.result() if result: if isinstance(result, Iterable): for temp in result: self._add_job(temp) else: self._add_job(result) except: logger.exception("CloudConnectEngine encountered exception") finally: self._teardown()
Example #30
Source File: _base.py From loky with BSD 3-Clause "New" or "Revised" License | 4 votes |
def wait(fs, timeout=None, return_when=ALL_COMPLETED): """Wait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures. """ with _AcquireFutures(fs): done = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) not_done = set(fs) - done if (return_when == FIRST_COMPLETED) and done: return DoneAndNotDoneFutures(done, not_done) elif (return_when == FIRST_EXCEPTION) and done: if any(f for f in done if not f.cancelled() and f.exception() is not None): return DoneAndNotDoneFutures(done, not_done) if len(done) == len(fs): return DoneAndNotDoneFutures(done, not_done) waiter = _create_and_install_waiters(fs, return_when) waiter.event.wait(timeout) for f in fs: with f._condition: f._waiters.remove(waiter) done.update(waiter.finished_futures) return DoneAndNotDoneFutures(done, set(fs) - done)