Python contextlib.AsyncExitStack() Examples

The following are 21 code examples of contextlib.AsyncExitStack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module contextlib , or try the search function .
Example #1
Source File: dataflow.py    From dffml with MIT License 7 votes vote down vote up
def run(self):
        # The merged dataflow
        merged: Dict[str, Any] = {}
        # For entering ConfigLoader contexts
        async with contextlib.AsyncExitStack() as exit_stack:
            # Load config loaders we'll need as we see their file types
            parsers: Dict[str, BaseConfigLoader] = {}
            for path in self.dataflows:
                _, exported = await BaseConfigLoader.load_file(
                    parsers, exit_stack, path
                )
                merge(merged, exported, list_append=True)
        # Export the dataflow
        dataflow = DataFlow._fromdict(**merged)
        async with self.configloader(BaseConfig()) as configloader:
            async with configloader() as loader:
                exported = dataflow.export(linked=not self.not_linked)
                print((await loader.dumpb(exported)).decode()) 
Example #2
Source File: adapters.py    From ptadapter with GNU General Public License v3.0 6 votes vote down vote up
def __init__(
            self,
            pt_exec: Union[List[str], List[bytes]],
            state: Union[str, bytes, os.PathLike],
            *,
            exit_on_stdin_close: bool = True,
    ) -> None:
        """Create the adapter.

        Args:
            pt_exec: The pluggable transport command line to execute. This has
                to be a list of str / bytes, since
                :func:`asyncio.create_subprocess_exec` does not accept an
                entire command line as a string. On non-Windows platforms
                :func:`shlex.split` can be used to split a command line string
                into a list, while on Windows it's a bit more complicated.
            state: The state directory. This is a directory where the PT is
                allowed to store state. Either specify a path (which
                is not required to exist, in which case the PT will create
                the directory), or specify ``None`` to use a temporary
                directory created using :mod:`tempfile`.
            exit_on_stdin_close: Whether closing the PT's STDIN indicates the
                PT should gracefully exit.
        """
        if isinstance(pt_exec, (str, bytes)):
            self._pt_args = [pt_exec]
        else:
            self._pt_args = list(pt_exec)
        if state is not None:
            self._state = os.path.abspath(state)
        else:
            self._state = None
        self._exit_on_stdin_close = exit_on_stdin_close

        self._process: asyncio.subprocess.Process = None
        self._stdout_task: asyncio.Task = None
        self._ready = asyncio.Future()
        self._accepted_version: str = None
        self._transports: Dict[str, asyncio.Future] = {}
        self._stopping = False
        self._stack = contextlib.AsyncExitStack() 
Example #3
Source File: logic.py    From trinity with MIT License 6 votes vote down vote up
def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Future[None]]:
        """
        See LogicAPI.apply()

        The future returned here will be done when the first of the futures obtained from applying
        all behaviors of this application is done.
        """
        self.connection = connection

        async with contextlib.AsyncExitStack() as stack:
            futures: List[asyncio.Future[None]] = []
            # First apply all the child behaviors
            for behavior in self._behaviors:
                if behavior.should_apply_to(connection):
                    fut = await stack.enter_async_context(behavior.apply(connection))
                    futures.append(fut)

            # If none of our behaviors were applied, use a never-ending Future so that callsites
            # can wait on it like when behaviors are applied.
            if not futures:
                futures.append(asyncio.Future())

            # Now register ourselves with the connection.
            with connection.add_logic(self.name, self):
                yield asyncio.create_task(wait_first(futures)) 
Example #4
Source File: console_script.py    From ptadapter with GNU General Public License v3.0 5 votes vote down vote up
def run_client(conf: configparser.ConfigParser) -> None:
    pt_exec, state, tunnels = get_common_options_from_section(conf['client'])
    proxy = conf['client'].get('proxy', None)
    if not proxy:
        proxy = None
    transports = set()
    handler_confs = []

    for t in tunnels:
        section = conf[t]
        transport = section['transport']
        listen_host, listen_port = str_utils.parse_hostport(section['listen'])
        upstream_host, upstream_port = str_utils.parse_hostport(
            section['upstream'])
        args = {key[8:]: value
                for key, value in section.items()
                if key.startswith('options-')}
        transports.add(transport)
        handler_confs.append((
            (listen_host, listen_port),
            (transport, upstream_host, upstream_port, args),
        ))

    adapter = adapters.ClientAdapter(
        pt_exec, state, list(transports), proxy)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(adapter)
        for listen_args, handler_args in handler_confs:
            handler = functools.partial(
                handle_client_connection, adapter, *handler_args)
            server = await asyncio.start_server(handler, *listen_args)
            await stack.enter_async_context(server)

        await adapter.wait()
        raise RuntimeError('PT process exited unexpectedly') 
Example #5
Source File: services.py    From trinity with MIT License 5 votes vote down vote up
def _run_background_services(
        services: Sequence[ServiceAPI],
        runner: Callable[[ServiceAPI], AsyncContextManager[ManagerAPI]]
) -> None:
    async with contextlib.AsyncExitStack() as stack:
        managers = tuple([
            await stack.enter_async_context(runner(service))
            for service in services
        ])
        # If any of the services terminate, we do so as well.
        await wait_first([
            asyncio.create_task(manager.wait_finished())
            for manager in managers
        ]) 
Example #6
Source File: memory.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(self) -> "BaseOrchestratorContext":
        # TODO(subflows) In all of these contexts we are about to enter, they
        # all reach into their parents and store things in the parents memory
        # (or similar). What should be done is to have them create their own
        # storage space, so that each context is unique (which seems quite
        # unsupprising now, not sure what I was thinking before). If an
        # operation wants to initiate a subflow. It will need to call a method
        # we have yet to write within the orchestrator context which will reach
        # up to the parent of that orchestrator context and create a new
        # orchestrator context, thus triggering this __aenter__ method for the
        # new context. The only case where an operation will not want to reach
        # up to the parent to get all new contexts, is when it's an output
        # operation which desires to execute a subflow. If the output operation
        # created new contexts, then there would be no inputs in them, so that
        # would be pointless.
        enter = {
            "rctx": self.parent.rchecker,
            "ictx": self.parent.input_network,
            "octx": self.parent.operation_network,
            "lctx": self.parent.lock_network,
            "nctx": self.parent.opimp_network,
        }
        # If we were told to reuse a context, don't enter it. Just set the
        # attribute now.
        for name, ctx in self.config.reuse.items():
            if name in enter:
                self.logger.debug("Reusing %s: %s", name, ctx)
                del enter[name]
                setattr(self, name, ctx)
        # Creat the exit stack and enter all the contexts we won't be reusing
        self._stack = AsyncExitStack()
        self._stack = await aenter_stack(self, enter)
        # Ensure that we can run the dataflow
        await self.initialize_dataflow(self.config.dataflow)
        return self 
Example #7
Source File: memory.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(
        self,
    ) -> "MemoryOperationImplementationNetworkContext":
        self._stack = AsyncExitStack()
        await self._stack.__aenter__()
        self.operations = {
            opimp.op.name: await self._stack.enter_async_context(opimp)
            for opimp in self.opimps.values()
        }
        return self 
Example #8
Source File: memory.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(self) -> "MemoryRedundancyCheckerContext":
        self.__stack = AsyncExitStack()
        self.__exit_stack = ExitStack()
        self.__exit_stack.__enter__()
        await self.__stack.__aenter__()
        self.kvstore = await self.__stack.enter_async_context(
            self.config.kvstore
        )
        self.loop = asyncio.get_event_loop()
        self.pool = self.__exit_stack.enter_context(
            concurrent.futures.ThreadPoolExecutor()
        )
        return self 
Example #9
Source File: memory.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(self) -> "MemoryRedundancyCheckerContext":
        self.__stack = AsyncExitStack()
        await self.__stack.__aenter__()
        self.kvctx = await self.__stack.enter_async_context(
            self.parent.kvstore()
        )
        return self 
Example #10
Source File: asynctestcase.py    From dffml with MIT License 5 votes vote down vote up
def setUp(self):
        super().setUp()
        self._stack = contextlib.ExitStack().__enter__()
        self._astack = await contextlib.AsyncExitStack().__aenter__() 
Example #11
Source File: asynchelper.py    From dffml with MIT License 5 votes vote down vote up
def aenter_stack(
    obj: Any,
    context_managers: Dict[str, AsyncContextManager],
    call: bool = True,
) -> AsyncExitStack:
    """
    Create a :py:class:`contextlib.AsyncExitStack` then go through each key,
    value pair in the dict of async context managers. Enter the context of each
    async context manager and call setattr on ``obj`` to set the attribute by
    the name of ``key`` to the value yielded by the async context manager.

    If ``call`` is true then the context entered will be the context returned by
    calling each context manager respectively.
    """
    stack = AsyncExitStack()
    await stack.__aenter__()
    if context_managers is not None:
        for key, ctxmanager in context_managers.items():
            if call:
                if inspect.isfunction(ctxmanager):
                    ctxmanager = ctxmanager.__get__(obj, obj.__class__)
                setattr(
                    obj, key, await stack.enter_async_context(ctxmanager())
                )
            else:
                setattr(obj, key, await stack.enter_async_context(ctxmanager))
    return stack 
Example #12
Source File: asynchelper.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(self):
        self.clear()
        self.__stack = AsyncExitStack()
        await self.__stack.__aenter__()
        for item in self.parent.data:
            # Equivalent to entering the Object context then calling the object
            # to get the ObjectContext and entering that context. We then
            # return a list of all the inner contexts
            # >>> async with BaseDataFlowObject() as obj:
            # >>>     async with obj() as ctx:
            # >>>         clist.append(ctx)
            citem = item()
            self.logger.debug("Entering context: %r", citem)
            self.data.append(await self.__stack.enter_async_context(citem))
        return self 
Example #13
Source File: configloader.py    From dffml with MIT License 5 votes vote down vote up
def load_file(
        cls,
        parsers: Dict[str, "BaseConfigLoader"],
        exit_stack: contextlib.AsyncExitStack,
        path: pathlib.Path,
        *,
        base_dir: Optional[pathlib.Path] = None,
    ) -> Dict:
        async def _get_config(temp_filepath):
            if not isinstance(temp_filepath, pathlib.Path):
                temp_filepath = pathlib.Path(temp_filepath)
            config_path, loaded = await BaseConfigLoader.load_single_file(
                parsers, exit_stack, temp_filepath, base_dir=base_dir
            )
            return config_path, loaded

        async def _get_config_aux(temp_filepath):
            _, loaded = await _get_config(temp_filepath)
            return loaded

        if len(path.suffixes) >= 2 and path.suffixes[-2] == ".dirconf":
            dir_name = path.parts[-1].split(".")[0]
            dir_path = os.path.join(*(path.parts[:-1] + (dir_name,)))

            temp_conf_dict = {dir_name: dir_path}
            config_path, conf_dict = await _get_config(path)
            explored = explore_directories(temp_conf_dict)
            explored = await nested_apply(explored, _get_config_aux)
            conf_dict.update(explored[dir_name])
        else:
            config_path, conf_dict = await _get_config(path)
        return config_path, conf_dict 
Example #14
Source File: configloader.py    From dffml with MIT License 5 votes vote down vote up
def load_single_file(
        cls,
        parsers: Dict[str, "BaseConfigLoader"],
        exit_stack: contextlib.AsyncExitStack,
        path: pathlib.Path,
        *,
        base_dir: Optional[pathlib.Path] = None,
    ) -> Dict:
        """
        Load one file and load the ConfigLoader for it if necessary, using the
        AsyncExitStack provided.
        """
        filetype = path.suffix.replace(".", "")
        # Load the parser for the filetype if it isn't already loaded
        if not filetype in parsers:
            # TODO Get configs for loaders from somewhere, probably the
            # config of the multicomm
            loader_cls = cls.load(filetype)
            loader = await exit_stack.enter_async_context(
                loader_cls(BaseConfig())
            )
            parsers[filetype] = await exit_stack.enter_async_context(loader())
        # The config will be stored by its unique filepath split on dirs
        config_path = list(
            path.parts[len(base_dir.parts) :]
            if base_dir is not None
            else path.parts
        )
        # Get rid of suffix for last member of path
        if config_path:
            config_path[-1] = path.stem
        config_path = tuple(config_path)
        # Load the file
        return config_path, await parsers[filetype].loadb(path.read_bytes()) 
Example #15
Source File: feature.py    From dffml with MIT License 5 votes vote down vote up
def __aenter__(self):
        self._stack = AsyncExitStack()
        await self._stack.__aenter__()
        for item in self.data:
            await self._stack.enter_async_context(item)
        return self 
Example #16
Source File: testing.py    From dffml with MIT License 5 votes vote down vote up
def setUp(self):
        self.exit_stack = contextlib.AsyncExitStack()
        await self.exit_stack.__aenter__()
        self.tserver = await self.exit_stack.enter_async_context(
            ServerRunner.patch(Server)
        )
        self.cli = Server(port=0, insecure=True)
        await self.tserver.start(self.cli.run())
        # Set up client
        self.session = await self.exit_stack.enter_async_context(
            aiohttp.ClientSession()
        ) 
Example #17
Source File: peer.py    From trinity with MIT License 4 votes vote down vote up
def run(self) -> None:
        self._start_time = time.monotonic()
        self.connection.add_command_handler(Disconnect, cast(HandlerFn, self._handle_disconnect))
        try:
            async with contextlib.AsyncExitStack() as stack:
                fut = await stack.enter_async_context(P2PAPI().as_behavior().apply(self.connection))
                futures = [fut]
                self.p2p_api = self.connection.get_logic('p2p', P2PAPI)

                for behavior in self.get_behaviors():
                    if behavior.should_apply_to(self.connection):
                        future = await stack.enter_async_context(behavior.apply(self.connection))
                        futures.append(future)

                self.connection.add_msg_handler(self._handle_subscriber_message)

                self.setup_protocol_handlers()

                # The `boot` process is run in the background to allow the `run` loop
                # to continue so that all of the Peer APIs can be used within the
                # `boot` task.
                self.manager.run_child_service(self.boot_manager)

                # Trigger the connection to start feeding messages though the handlers
                self.connection.start_protocol_streams()
                self.ready.set()

                try:
                    await wait_first(futures)
                except asyncio.CancelledError:
                    raise
                except BaseException:
                    self.logger.exception("Behavior finished before us, cancelling ourselves")
                    self.manager.cancel()
        finally:
            for callback in self._finished_callbacks:
                callback(self)
            if (self.p2p_api.local_disconnect_reason is None and
                    self.p2p_api.remote_disconnect_reason is None):
                self._send_disconnect(DisconnectReason.CLIENT_QUITTING)
            # We run as a child service of the connection, but we don't want to leave a connection
            # open if somebody cancels just us, so this ensures the connection gets closed as well.
            if not self.connection.get_manager().is_cancelled:
                self.logger.debug("Connection hasn't been cancelled yet, doing so now")
                self.connection.get_manager().cancel() 
Example #18
Source File: test_requests.py    From trinity with MIT License 4 votes vote down vote up
def test_proxy_peer_requests(request,
                                   event_bus,
                                   other_event_bus,
                                   event_loop,
                                   chaindb_20,
                                   client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(run_peer_pool_event_server(
            client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        await stack.enter_async_context(run_peer_pool_event_server(
            server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        await stack.enter_async_context(background_asyncio_service(ETHRequestServer(
            server_event_bus,
            TO_NETWORKING_BROADCAST_CONFIG,
            AsyncChainDB(chaindb_20.db)
        )))

        client_proxy_peer_pool = ETHProxyPeerPool(client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.session)

        headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        assert len(headers) == 1
        block_header = headers[0]
        assert block_header.block_number == 0

        receipts = await proxy_peer.eth_api.get_receipts(headers)
        assert len(receipts) == 1
        receipt = receipts[0]
        assert receipt[1][0] == block_header.receipt_root

        block_bundles = await proxy_peer.eth_api.get_block_bodies(headers)
        assert len(block_bundles) == 1
        first_bundle = block_bundles[0]
        assert first_bundle[1][0] == block_header.transaction_root

        node_data = await proxy_peer.eth_api.get_node_data((block_header.state_root,))
        assert node_data[0][0] == block_header.state_root 
Example #19
Source File: test_requests.py    From trinity with MIT License 4 votes vote down vote up
def test_get_pooled_transactions_request(request,
                                               event_bus,
                                               other_event_bus,
                                               event_loop,
                                               chaindb_20,
                                               client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    if get_highest_eth_protocol_version(client_peer) < ETHProtocolV65.version:
        pytest.skip("Test not applicable below eth/65")

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(run_peer_pool_event_server(
            client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        await stack.enter_async_context(run_peer_pool_event_server(
            server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        client_proxy_peer_pool = ETHProxyPeerPool(client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.session)

        # The reason we run this test separately from the other request tests is because
        # GetPooledTransactions requests should be answered from the tx pool which the previous
        # test does not depend on.
        await stack.enter_async_context(background_asyncio_service(TxPool(
            server_event_bus,
            proxy_peer_pool,
            lambda _: True
        )))

        # The tx pool always answers these with an empty response
        txs = await proxy_peer.eth_api.get_pooled_transactions(
            (decode_hex('0x9ea39df6210064648ecbc465cd628fe52f69af53792e1c2f27840133435159d4'),)
        )
        assert len(txs) == 0 
Example #20
Source File: test_requests.py    From trinity with MIT License 4 votes vote down vote up
def test_requests_when_peer_in_client_vanishs(request,
                                                    event_bus,
                                                    other_event_bus,
                                                    event_loop,
                                                    chaindb_20,
                                                    client_and_server):

    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(run_peer_pool_event_server(
            client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
        ))
        await stack.enter_async_context(run_peer_pool_event_server(
            server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        await stack.enter_async_context(background_asyncio_service(ETHRequestServer(
            server_event_bus,
            TO_NETWORKING_BROADCAST_CONFIG,
            AsyncChainDB(chaindb_20.db)
        )))
        client_proxy_peer_pool = ETHProxyPeerPool(client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(client_proxy_peer_pool))

        server_proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(server_proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.session)

        # We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
        client_peer_pool.connected_nodes.pop(client_peer.session)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_receipts(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_block_bodies(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_node_data(())


#
# === ETH-specific server tests
# 
Example #21
Source File: test_tx_pool.py    From trinity with MIT License 4 votes vote down vote up
def two_connected_tx_pools(event_bus,
                                 other_event_bus,
                                 event_loop,
                                 funded_address_private_key,
                                 chain_with_block_validation,
                                 tx_validator,
                                 client_and_server):

    alice_event_bus = event_bus
    bob_event_bus = other_event_bus
    bob, alice = client_and_server

    bob_peer_pool = MockPeerPoolWithConnectedPeers([bob], event_bus=bob_event_bus)
    alice_peer_pool = MockPeerPoolWithConnectedPeers([alice], event_bus=alice_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(run_peer_pool_event_server(
            bob_event_bus, bob_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        await stack.enter_async_context(run_peer_pool_event_server(
            alice_event_bus, alice_peer_pool, handler_type=ETHPeerPoolEventServer
        ))

        bob_proxy_peer_pool = ETHProxyPeerPool(bob_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(bob_proxy_peer_pool))

        alice_proxy_peer_pool = ETHProxyPeerPool(alice_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(background_asyncio_service(alice_proxy_peer_pool))

        alice_tx_pool = TxPool(
            alice_event_bus,
            alice_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(background_asyncio_service(alice_tx_pool))

        bob_tx_pool = TxPool(
            bob_event_bus,
            bob_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(background_asyncio_service(bob_tx_pool))

        yield (alice, alice_event_bus, alice_tx_pool, ), (bob, bob_event_bus, bob_tx_pool)