Python aiohttp.client() Examples
The following are 17
code examples of aiohttp.client().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
aiohttp
, or try the search function
.
Example #1
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 6 votes |
def close(self): """Close all fixtures created by the test client. After that point, the TestClient is no longer usable. This is an idempotent function: running close multiple times will not have any additional effects. close is also run on exit when used as a(n) (asynchronous) context manager. """ if not self._closed: for resp in self._responses: resp.close() for ws in self._websockets: yield from ws.close() self._session.close() yield from self._server.close() self._closed = True
Example #2
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 6 votes |
def close(self): """Close all fixtures created by the test client. After that point, the TestClient is no longer usable. This is an idempotent function: running close multiple times will not have any additional effects. close is also run when the object is garbage collected, and on exit when used as a context manager. """ if self.started and not self.closed: self.server.close() yield from self.server.wait_closed() self._root = None self.port = None yield from self._close_hook() self._closed = True
Example #3
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 6 votes |
def close(self): """Close all fixtures created by the test client. After that point, the TestClient is no longer usable. This is an idempotent function: running close multiple times will not have any additional effects. close is also run when the object is garbage collected, and on exit when used as a context manager. """ if self.started and not self.closed: self.server.close() yield from self.server.wait_closed() self._root = None self.port = None yield from self._close_hook() self._closed = True
Example #4
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 6 votes |
def close(self): """Close all fixtures created by the test client. After that point, the TestClient is no longer usable. This is an idempotent function: running close multiple times will not have any additional effects. close is also run on exit when used as a(n) (asynchronous) context manager. """ if not self._closed: for resp in self._responses: resp.close() for ws in self._websockets: yield from ws.close() self._session.close() yield from self._server.close() self._closed = True
Example #5
Source File: client.py From aiohttp-xmlrpc with MIT License | 5 votes |
def __init__(self, url, client=None, headers=None, encoding=None, **kwargs): self.headers = MultiDict(headers or {}) self.headers.setdefault('Content-Type', 'text/xml') self.headers.setdefault('User-Agent', self.USER_AGENT) self.encoding = encoding self.url = str(url) self.client = client or aiohttp.client.ClientSession(**kwargs)
Example #6
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 5 votes |
def setUp(self): self.loop = setup_test_loop() self.app = self.loop.run_until_complete(self.get_application()) self.client = self.loop.run_until_complete(self._get_client(self.app)) self.loop.run_until_complete(self.client.start_server())
Example #7
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 5 votes |
def session(self): """An internal aiohttp.ClientSession. Unlike the methods on the TestClient, client session requests do not automatically include the host in the url queried, and will require an absolute path to the resource. """ return self._session
Example #8
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 5 votes |
def tearDown(self): self.loop.run_until_complete(self.client.close()) teardown_test_loop(self.loop)
Example #9
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 5 votes |
def setUp(self): self.loop = setup_test_loop() self.app = self.loop.run_until_complete(self.get_application()) self.client = self.loop.run_until_complete(self._get_client(self.app)) self.loop.run_until_complete(self.client.start_server())
Example #10
Source File: test_utils.py From lambda-text-extractor with Apache License 2.0 | 5 votes |
def session(self): """An internal aiohttp.ClientSession. Unlike the methods on the TestClient, client session requests do not automatically include the host in the url queried, and will require an absolute path to the resource. """ return self._session
Example #11
Source File: client.py From rally with Apache License 2.0 | 5 votes |
def wait_for_rest_layer(es, max_attempts=40): """ Waits for ``max_attempts`` until Elasticsearch's REST API is available. :param es: Elasticsearch client to use for connecting. :param max_attempts: The maximum number of attempts to check whether the REST API is available. :return: True iff Elasticsearch's REST API is available. """ # assume that at least the hosts that we expect to contact should be available. Note that this is not 100% # bullet-proof as a cluster could have e.g. dedicated masters which are not contained in our list of target hosts # but this is still better than just checking for any random node's REST API being reachable. expected_node_count = len(es.transport.hosts) logger = logging.getLogger(__name__) for attempt in range(max_attempts): logger.debug("REST API is available after %s attempts", attempt) import elasticsearch try: # see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also # available when the cluster status is RED (as long as all required nodes are present) es.cluster.health(wait_for_nodes=">={}".format(expected_node_count)) logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt) return True except elasticsearch.ConnectionError as e: if "SSL: UNKNOWN_PROTOCOL" in str(e): raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e) else: logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt) time.sleep(3) except elasticsearch.TransportError as e: # cluster block, x-pack not initialized yet, our wait condition is not reached if e.status_code in (503, 401, 408): logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt) time.sleep(3) else: logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt) raise e return False
Example #12
Source File: client.py From aiohttp-xmlrpc with MIT License | 5 votes |
def close(self): return self.client.close()
Example #13
Source File: client.py From aiohttp-xmlrpc with MIT License | 5 votes |
def __remote_call(self, method_name, *args, **kwargs): async with self.client.post( str(self.url), data=etree.tostring( self._make_request(method_name, *args, **kwargs), xml_declaration=True, encoding=self.encoding ), headers=self.headers, ) as response: response.raise_for_status() return self._parse_response((await response.read()), method_name)
Example #14
Source File: endpoint.py From aiobotocore with Apache License 2.0 | 5 votes |
def _send(self, request): # Note: When using aiobotocore with dynamodb, requests fail on crc32 # checksum computation as soon as the response data reaches ~5KB. # When AWS response is gzip compressed: # 1. aiohttp is automatically decompressing the data # (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content) # 2. botocore computes crc32 on the uncompressed data bytes and fails # cause crc32 has been computed on the compressed data # The following line forces aws not to use gzip compression, # if there is a way to configure aiohttp not to perform decompression, # we can remove the following line and take advantage of # aws gzip compression. # https://github.com/boto/botocore/issues/1255 url = request.url headers = request.headers data = request.body headers['Accept-Encoding'] = 'identity' headers_ = MultiDict( (z[0], _text(z[1], encoding='utf-8')) for z in headers.items()) # botocore does this during the request so we do this here as well # TODO: this should be part of the ClientSession, perhaps make wrapper proxy = self.proxies.get(urlparse(url.lower()).scheme) if isinstance(data, io.IOBase): data = _IOBaseWrapper(data) url = URL(url, encoded=True) resp = await self.http_session.request( request.method, url=url, headers=headers_, data=data, proxy=proxy) # If we're not streaming, read the content so we can retry any timeout # errors, see: # https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604 if not request.stream_output: await resp.read() return resp
Example #15
Source File: endpoint.py From lambda-text-extractor with Apache License 2.0 | 4 votes |
def _request(self, method, url, headers, data): # Note: When using aiobotocore with dynamodb, requests fail on crc32 # checksum computation as soon as the response data reaches ~5KB. # When AWS response is gzip compressed: # 1. aiohttp is automatically decompressing the data # (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content) # 2. botocore computes crc32 on the uncompressed data bytes and fails # cause crc32 has been computed on the compressed data # The following line forces aws not to use gzip compression, # if there is a way to configure aiohttp not to perform decompression, # we can remove the following line and take advantage of # aws gzip compression. # See: https://github.com/aio-libs/aiohttp/issues/1992 headers['Accept-Encoding'] = 'identity' headers_ = MultiDict( (z[0], text_(z[1], encoding='utf-8')) for z in headers.items()) # botocore does this during the request so we do this here as well proxy = self.proxies.get(urlparse(url.lower()).scheme) if AIOHTTP_2 and isinstance(data, io.IOBase): data = _IOBaseWrapper(data) url = URL(url, encoded=True) # See https://github.com/aio-libs/aiobotocore/issues/267 for details for i in range(MAX_REDIRECTS): resp = yield from self._aio_session.request(method, url=url, headers=headers_, data=data, proxy=proxy, timeout=None, allow_redirects=False) if resp.status in {301, 302, 303, 307}: redir_arr = _aiohttp_do_redirect(self._aio_session, method, url, headers, data, resp) if redir_arr is None: break method, url, headers, params, data = redir_arr else: break return resp
Example #16
Source File: endpoint.py From lambda-text-extractor with Apache License 2.0 | 4 votes |
def __init__(self, host, endpoint_prefix, event_emitter, proxies=None, verify=True, timeout=DEFAULT_TIMEOUT, response_parser_factory=None, max_pool_connections=MAX_POOL_CONNECTIONS, loop=None, connector_args=None): super().__init__(host, endpoint_prefix, event_emitter, proxies=proxies, verify=verify, timeout=timeout, response_parser_factory=response_parser_factory, max_pool_connections=max_pool_connections) if isinstance(timeout, (list, tuple)): self._conn_timeout, self._read_timeout = timeout else: self._conn_timeout = self._read_timeout = timeout self._loop = loop or asyncio.get_event_loop() if connector_args is None: # AWS has a 20 second idle timeout: # https://forums.aws.amazon.com/message.jspa?messageID=215367 # aiohttp default timeout is 30s so set something reasonable here connector_args = dict(keepalive_timeout=12) connector = aiohttp.TCPConnector(loop=self._loop, limit=max_pool_connections, verify_ssl=self.verify, **connector_args) # This begins the journey into our replacement of aiohttp's # `read_timeout`. Their implementation represents an absolute time # from the initial request, to the last read. So if the client delays # reading the body for long enough the request would be cancelled. # See https://github.com/aio-libs/aiobotocore/issues/245 assert connector._factory.func == ResponseHandler connector._factory = functools.partial( WrappedResponseHandler, wrapped_read_timeout=self._read_timeout, *connector._factory.args, **connector._factory.keywords) self._aio_session = aiohttp.ClientSession( connector=connector, read_timeout=None, conn_timeout=self._conn_timeout, skip_auto_headers={'CONTENT-TYPE'}, response_class=ClientResponseProxy, loop=self._loop)
Example #17
Source File: endpoint.py From lambda-text-extractor with Apache License 2.0 | 4 votes |
def _request(self, method, url, headers, data): # Note: When using aiobotocore with dynamodb, requests fail on crc32 # checksum computation as soon as the response data reaches ~5KB. # When AWS response is gzip compressed: # 1. aiohttp is automatically decompressing the data # (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content) # 2. botocore computes crc32 on the uncompressed data bytes and fails # cause crc32 has been computed on the compressed data # The following line forces aws not to use gzip compression, # if there is a way to configure aiohttp not to perform decompression, # we can remove the following line and take advantage of # aws gzip compression. # See: https://github.com/aio-libs/aiohttp/issues/1992 headers['Accept-Encoding'] = 'identity' headers_ = MultiDict( (z[0], text_(z[1], encoding='utf-8')) for z in headers.items()) # botocore does this during the request so we do this here as well proxy = self.proxies.get(urlparse(url.lower()).scheme) if AIOHTTP_2 and isinstance(data, io.IOBase): data = _IOBaseWrapper(data) url = URL(url, encoded=True) # See https://github.com/aio-libs/aiobotocore/issues/267 for details for i in range(MAX_REDIRECTS): resp = yield from self._aio_session.request(method, url=url, headers=headers_, data=data, proxy=proxy, timeout=None, allow_redirects=False) if resp.status in {301, 302, 303, 307}: redir_arr = _aiohttp_do_redirect(self._aio_session, method, url, headers, data, resp) if redir_arr is None: break method, url, headers, params, data = redir_arr else: break return resp