Python asyncio.PriorityQueue() Examples
The following are 19
code examples of asyncio.PriorityQueue().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
asyncio
, or try the search function
.
Example #1
Source File: punish.py From calebj-cogs with GNU General Public License v3.0 | 6 votes |
def __init__(self, bot): self.bot = bot self.json = compat_load(JSON) # queue variables self.queue = asyncio.PriorityQueue(loop=bot.loop) self.queue_lock = asyncio.Lock(loop=bot.loop) self.pending = {} self.enqueued = set() try: self.analytics = CogAnalytics(self) except Exception as error: self.bot.logger.exception(error) self.analytics = None self.task = bot.loop.create_task(self.on_load())
Example #2
Source File: scheduler.py From cocrawler with Apache License 2.0 | 6 votes |
def __init__(self, robots, resolver): self.robots = robots self.resolver = resolver self.q = asyncio.PriorityQueue() self.ridealong = {} self.awaiting_work = 0 self.maxhostqps = None self.delta_t = None self.next_fetch = cachetools.ttl.TTLCache(10000, 10) # 10 seconds good enough for QPS=0.1 and up self.frozen_until = cachetools.ttl.TTLCache(10000, 10) # 10 seconds is longer than our typical delay self.maxhostqps = float(config.read('Crawl', 'MaxHostQPS')) self.delta_t = 1./self.maxhostqps self.initialize_budgets() _, prefetch_dns = fetcher.global_policies() self.use_ip_key = prefetch_dns memory.register_debug(self.memory)
Example #3
Source File: peers.py From trinity with MIT License | 6 votes |
def __init__( self, response_command_type: Union[Type[CommandAPI[Any]], Sequence[Type[CommandAPI[Any]]]], sort_key: Callable[[PerformanceAPI], float] = _items_per_second) -> None: """ :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked """ self._waiting_peers = PriorityQueue() if isinstance(response_command_type, type): self._response_command_type = (response_command_type,) elif isinstance(response_command_type, collections.abc.Sequence): self._response_command_type = tuple(response_command_type) else: raise TypeError(f"Unsupported value: {response_command_type}") self._peer_wrapper = SortableTask.orderable_by_func(self._get_peer_rank) self._sort_key = sort_key
Example #4
Source File: pool.py From aioelasticsearch with MIT License | 6 votes |
def __init__( self, connections, dead_timeout=60, timeout_cutoff=5, selector_class=RoundRobinSelector, randomize_hosts=True, *, loop, **kwargs ): self._dead_timeout = dead_timeout self.timeout_cutoff = timeout_cutoff self.connection_opts = connections self.connections = [c for (c, _) in connections] self.orig_connections = set(self.connections) self.dead = asyncio.PriorityQueue(len(self.connections), loop=loop) self.dead_count = collections.Counter() self.loop = loop if randomize_hosts: random.shuffle(self.connections) self.selector = selector_class(dict(connections))
Example #5
Source File: queue.py From Amipy with MIT License | 5 votes |
def __init__(self,maxsize=0): super(PriorityQueue, self).__init__(maxsize)
Example #6
Source File: test_queues.py From android_universal with MIT License | 5 votes |
def test_order(self): q = asyncio.PriorityQueue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 2, 3], items)
Example #7
Source File: scheduler.py From cocrawler with Apache License 2.0 | 5 votes |
def load(self, crawler, f): header = pickle.load(f) # XXX check that this is a good header... log it self.ridealong = pickle.load(f) crawler._seeds = pickle.load(f) self.q = asyncio.PriorityQueue() count = pickle.load(f) for _ in range(0, count): work = pickle.load(f) self.q.put_nowait(work)
Example #8
Source File: eventqueue.py From presso with GNU General Public License v3.0 | 5 votes |
def __init__(self): self.__locker = {} self.__queue = PriorityQueue()
Example #9
Source File: test_queues.py From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 | 5 votes |
def test_order(self): q = asyncio.PriorityQueue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 2, 3], items)
Example #10
Source File: prioqueue.py From mp with Apache License 2.0 | 5 votes |
def run(): queue = asyncio.PriorityQueue() consumer = asyncio.ensure_future(consume(queue)) await produce(queue) await queue.join() consumer.cancel()
Example #11
Source File: test_queues.py From annotated-py-projects with MIT License | 5 votes |
def test_order(self): q = asyncio.PriorityQueue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 2, 3], items)
Example #12
Source File: queue_usage.py From Daniel-Arbuckles-Mastering-Python with MIT License | 5 votes |
def using_queues(): q = asyncio.Queue() q.put_nowait('Hello') await q.get() await q.put('world') q.get_nowait() pq = asyncio.PriorityQueue() stack = asyncio.LifoQueue()
Example #13
Source File: test_queues.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_order(self): q = asyncio.PriorityQueue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 2, 3], items)
Example #14
Source File: test_queues.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_order(self): q = asyncio.PriorityQueue(loop=self.loop) for i in [1, 3, 2]: q.put_nowait(i) items = [q.get_nowait() for _ in range(3)] self.assertEqual([1, 2, 3], items)
Example #15
Source File: scheduler.py From Squid-Plugins with MIT License | 5 votes |
def __init__(self, bot): self.bot = bot self.events = fileIO('data/scheduler/events.json', 'load') self.queue = asyncio.PriorityQueue(loop=self.bot.loop) self.queue_lock = asyncio.Lock() self.to_kill = {} self._load_events()
Example #16
Source File: scheduler.py From calebj-cogs with GNU General Public License v3.0 | 5 votes |
def __init__(self, bot): self.bot = bot self.events = dataIO.load_json(JSON) self.queue = asyncio.PriorityQueue(loop=self.bot.loop) self.queue_lock = asyncio.Lock() self.pending = {} self.pending_by_event = defaultdict(lambda: list()) self._load_events() self.task = bot.loop.create_task(self.queue_manager())
Example #17
Source File: pool.py From aioes with Apache License 2.0 | 5 votes |
def __init__(self, connections, *, dead_timeout=60, timeout_cutoff=5, selector_factory=RoundRobinSelector, loop): self._dead_timeout = dead_timeout self._timeout_cutoff = timeout_cutoff self._selector = selector_factory() self._dead = asyncio.PriorityQueue(len(connections), loop=loop) self._dead_count = collections.Counter() self._connections = connections self._loop = loop
Example #18
Source File: Blockfetcher.py From pycoinnet with MIT License | 5 votes |
def __init__(self, max_q_size=0): # this queue accepts tuples of the form: # (block_index, InvItem(ITEM_TYPE_BLOCK, block_hash), future) self.block_hash_priority_queue = asyncio.PriorityQueue(max_q_size)
Example #19
Source File: inv_batcher.py From spruned with MIT License | 4 votes |
def __init__(self, target_batch_time=10, max_batch_size=500, inv_item_future_q_maxsize=1000): self._is_closing = False self._inv_item_future_queue = asyncio.PriorityQueue(maxsize=inv_item_future_q_maxsize) async def batch_getdata_fetches(peer_batch_tuple, q): peer, desired_batch_size = peer_batch_tuple batch = [] skipped = [] logger.info("peer %s trying to build batch up to size %d", peer, desired_batch_size) while len(batch) == 0 or ( len(batch) < desired_batch_size and not self._inv_item_future_queue.empty()): item = await self._inv_item_future_queue.get() (priority, inv_item, f, peers_tried) = item if f.done(): continue if peer in peers_tried: skipped.append(item) else: batch.append(item) if len(batch) > 0: await q.put((peer, batch, desired_batch_size)) for item in skipped: if not item[2].done: await self._inv_item_future_queue.put(item) async def fetch_batch(peer_batch, q): loop = asyncio.get_event_loop() peer, batch, prior_max = peer_batch inv_items = [inv_item for (priority, inv_item, f, peers_tried) in batch] peer.send_msg("getdata", items=inv_items) start_time = loop.time() futures = [f for (priority, bh, f, peers_tried) in batch] await asyncio.wait(futures, timeout=target_batch_time) end_time = loop.time() batch_time = end_time - start_time logger.info("completed batch size of %d with time %f", len(inv_items), batch_time) completed_count = sum([1 for f in futures if f.done()]) item_per_unit_time = completed_count / batch_time new_batch_size = min(prior_max * 4, int(target_batch_time * item_per_unit_time + 0.5)) new_batch_size = min(max(1, new_batch_size), max_batch_size) logger.info("new batch size for %s is %d", peer, new_batch_size) for (priority, inv_item, f, peers_tried) in batch: if not f.done(): peers_tried.add(peer) await self._inv_item_future_queue.put((priority, inv_item, f, peers_tried)) await self._peer_batch_queue.put((peer, new_batch_size)) self._peer_batch_queue = MappingQueue( dict(callback_f=batch_getdata_fetches), dict(callback_f=fetch_batch, input_q_maxsize=2), ) self._inv_item_hash_to_future = dict()