Python botocore.exceptions.ConnectionError() Examples
The following are 8
code examples of botocore.exceptions.ConnectionError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
botocore.exceptions
, or try the search function
.
Example #1
Source File: efs.py From awslimitchecker with GNU Affero General Public License v3.0 | 6 votes |
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) self.connect() for lim in self.limits.values(): lim._reset_usage() try: self._find_usage_filesystems() except (ConnectionError, ClientError) as ex: logger.warning( 'Caught exception when trying to use EFS (' 'perhaps the EFS service is not available in this ' 'region?): %s', ex ) self._have_usage = True logger.debug("Done checking usage.")
Example #2
Source File: test_efs.py From awslimitchecker with GNU Affero General Public License v3.0 | 6 votes |
def test_find_usage_no_endpoint(self): exc = ConnectionError( error='foo' ) mock_conn = Mock() with patch('%s.connect' % pb) as mock_connect: with patch('%s.paginate_dict' % pbm) as mock_paginate: mock_paginate.side_effect = exc cls = _EfsService(21, 43, {}, None) cls.conn = mock_conn assert cls._have_usage is False cls.find_usage() assert cls._have_usage is True assert mock_connect.mock_calls == [call()] assert mock_paginate.mock_calls == [ call( mock_conn.describe_file_systems, alc_marker_path=['NextMarker'], alc_data_path=['FileSystems'], alc_marker_param='Marker' ) ] assert len(cls.limits) == 1 usage = cls.limits['File systems'].get_current_usage() assert len(usage) == 0
Example #3
Source File: discovery.py From deepWordBug with Apache License 2.0 | 5 votes |
def _refresh_current_endpoints(self, **kwargs): cache_key = self._create_cache_key(**kwargs) try: response = self._describe_endpoints(**kwargs) endpoints = self._parse_endpoints(response) self._cache[cache_key] = endpoints self._failed_attempts.pop(cache_key, None) return endpoints except (ConnectionError, HTTPClientError): self._failed_attempts[cache_key] = self._time() + 60 return None
Example #4
Source File: discovery.py From bash-lambda-layer with MIT License | 5 votes |
def _refresh_current_endpoints(self, **kwargs): cache_key = self._create_cache_key(**kwargs) try: response = self._describe_endpoints(**kwargs) endpoints = self._parse_endpoints(response) self._cache[cache_key] = endpoints self._failed_attempts.pop(cache_key, None) return endpoints except (ConnectionError, HTTPClientError): self._failed_attempts[cache_key] = self._time() + 60 return None
Example #5
Source File: discovery.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def _refresh_current_endpoints(self, **kwargs): cache_key = self._create_cache_key(**kwargs) try: response = self._describe_endpoints(**kwargs) endpoints = self._parse_endpoints(response) self._cache[cache_key] = endpoints self._failed_attempts.pop(cache_key, None) return endpoints except (ConnectionError, HTTPClientError): self._failed_attempts[cache_key] = self._time() + 60 return None
Example #6
Source File: discovery.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def _refresh_current_endpoints(self, **kwargs): cache_key = self._create_cache_key(**kwargs) try: response = self._describe_endpoints(**kwargs) endpoints = self._parse_endpoints(response) self._cache[cache_key] = endpoints self._failed_attempts.pop(cache_key, None) return endpoints except (ConnectionError, HTTPClientError): self._failed_attempts[cache_key] = self._time() + 60 return None
Example #7
Source File: logs.py From cli with MIT License | 5 votes |
def run(self) -> None: """ Watch for new logs and pass each log entry to the "consumer" function. """ # Track the last timestamp we see. When we fetch_stream() again on the # next iteration, we'll start from that timestamp onwards to avoid # fetching every single page again. The last event or two will be # still be in the response, but our de-duping will ignore those. last_timestamp = None # Keep track of what log entries we've consumed so that we suppress # duplicates. Duplicates will arise in our stream due to the way we # watch for new entries. consumed = set() # type: MutableSet # How many successful vs failed fetch_stream calls. If we consistently see # failures but we never see a successful attempt, we should raise an exception # and stop. success_count = 0 failure_count = 0 while not self.stopped.wait(0.2): try: for entry in fetch_stream(self.stream, start_time = last_timestamp): if entry["eventId"] not in consumed: consumed.add(entry["eventId"]) last_timestamp = entry["timestamp"] self.consumer(entry) except (ClientError, BotocoreConnectionError): failure_count += 1 if failure_count > MAX_FAILURES and not success_count: raise else: success_count += 1
Example #8
Source File: discovery.py From aws-builders-fair-projects with Apache License 2.0 | 5 votes |
def _refresh_current_endpoints(self, **kwargs): cache_key = self._create_cache_key(**kwargs) try: response = self._describe_endpoints(**kwargs) endpoints = self._parse_endpoints(response) self._cache[cache_key] = endpoints self._failed_attempts.pop(cache_key, None) return endpoints except (ConnectionError, HTTPClientError): self._failed_attempts[cache_key] = self._time() + 60 return None