Python elasticsearch.exceptions.ConnectionTimeout() Examples
The following are 15
code examples of elasticsearch.exceptions.ConnectionTimeout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
elasticsearch.exceptions
, or try the search function
.
Example #1
Source File: __init__.py From prometheus-es-exporter with MIT License | 6 votes |
def collect(self): try: response = self.es_client.cluster.health(level=self.level, request_timeout=self.timeout) metrics = cluster_health_parser.parse_response(response, self.metric_name_list) metric_dict = group_metrics(metrics) except ConnectionTimeout: log.warning('Timeout while fetching %(description)s (timeout %(timeout_s)ss).', {'description': self.description, 'timeout_s': self.timeout}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) except Exception: log.exception('Error while fetching %(description)s.', {'description': self.description}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) else: yield from gauge_generator(metric_dict) yield collector_up_gauge(self.metric_name_list, self.description)
Example #2
Source File: __init__.py From prometheus-es-exporter with MIT License | 6 votes |
def collect(self): try: response = self.es_client.nodes.stats(metric=self.metrics, request_timeout=self.timeout) metrics = nodes_stats_parser.parse_response(response, self.metric_name_list) metric_dict = group_metrics(metrics) except ConnectionTimeout: log.warning('Timeout while fetching %(description)s (timeout %(timeout_s)ss).', {'description': self.description, 'timeout_s': self.timeout}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) except Exception: log.exception('Error while fetching %(description)s.', {'description': self.description}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) else: yield from gauge_generator(metric_dict) yield collector_up_gauge(self.metric_name_list, self.description)
Example #3
Source File: __init__.py From prometheus-es-exporter with MIT License | 6 votes |
def collect(self): try: response = self.es_client.indices.get_alias(request_timeout=self.timeout) metrics = indices_aliases_parser.parse_response(response, self.metric_name_list) metric_dict = group_metrics(metrics) except ConnectionTimeout: log.warning('Timeout while fetching %(description)s (timeout %(timeout_s)ss).', {'description': self.description, 'timeout_s': self.timeout}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) except Exception: log.exception('Error while fetching %(description)s.', {'description': self.description}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) else: yield from gauge_generator(metric_dict) yield collector_up_gauge(self.metric_name_list, self.description)
Example #4
Source File: __init__.py From prometheus-es-exporter with MIT License | 6 votes |
def collect(self): try: response = self.es_client.indices.get_mapping(request_timeout=self.timeout) metrics = indices_mappings_parser.parse_response(response, self.metric_name_list) metric_dict = group_metrics(metrics) except ConnectionTimeout: log.warning('Timeout while fetching %(description)s (timeout %(timeout_s)ss).', {'description': self.description, 'timeout_s': self.timeout}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) except Exception: log.exception('Error while fetching %(description)s.', {'description': self.description}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) else: yield from gauge_generator(metric_dict) yield collector_up_gauge(self.metric_name_list, self.description)
Example #5
Source File: __init__.py From prometheus-es-exporter with MIT License | 6 votes |
def collect(self): try: response = self.es_client.indices.stats(metric=self.metrics, fields=self.fields, request_timeout=self.timeout) metrics = indices_stats_parser.parse_response(response, self.parse_indices, self.metric_name_list) metric_dict = group_metrics(metrics) except ConnectionTimeout: log.warning('Timeout while fetching %(description)s (timeout %(timeout_s)ss).', {'description': self.description, 'timeout_s': self.timeout}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) except Exception: log.exception('Error while fetching %(description)s.', {'description': self.description}) yield collector_up_gauge(self.metric_name_list, self.description, succeeded=False) else: yield from gauge_generator(metric_dict) yield collector_up_gauge(self.metric_name_list, self.description)
Example #6
Source File: views.py From autocompeter with Mozilla Public License 2.0 | 6 votes |
def es_retry(callable, *args, **kwargs): sleep_time = kwargs.pop('_sleep_time', 1) attempts = kwargs.pop('_attempts', 10) verbose = kwargs.pop('_verbose', False) try: return callable(*args, **kwargs) except (ConnectionTimeout,) as exception: if attempts: attempts -= 1 if verbose: print("ES Retrying ({} {}) {}".format( attempts, sleep_time, exception )) time.sleep(sleep_time) else: raise
Example #7
Source File: variant_search_api.py From seqr with GNU Affero General Public License v3.0 | 5 votes |
def query_variants_handler(request, search_hash): """Search variants. """ page = int(request.GET.get('page') or 1) per_page = int(request.GET.get('per_page') or 100) sort = request.GET.get('sort') or XPOS_SORT_KEY if sort == PATHOGENICTY_SORT_KEY and request.user.is_staff: sort = PATHOGENICTY_HGMD_SORT_KEY try: results_model = _get_or_create_results_model(search_hash, json.loads(request.body or '{}'), request.user) except Exception as e: logger.error(e) return create_json_response({'error': str(e)}, status=400, reason=str(e)) _check_results_permission(results_model, request.user) try: variants, total_results = get_es_variants(results_model, sort=sort, page=page, num_results=per_page) except InvalidIndexException as e: logger.error('InvalidIndexException: {}'.format(e)) return create_json_response({'error': str(e)}, status=400, reason=str(e)) except ConnectionTimeout: return create_json_response({}, status=504, reason='Query Time Out') response = _process_variants(variants or [], results_model.families.all(), request.user) response['search'] = _get_search_context(results_model) response['search']['totalResults'] = total_results return create_json_response(response)
Example #8
Source File: views_test.py From bouncer with BSD 2-Clause "Simplified" License | 5 votes |
def test_failed_es_request(self): request = mock_request() exc = es_exceptions.ConnectionTimeout() request.es.cluster.health.side_effect = exc with pytest.raises(views.FailedHealthcheck) as e: views.healthcheck(request) assert e.value.__cause__ == exc
Example #9
Source File: base.py From osint-combiner with MIT License | 5 votes |
def exists_es_index(str_valid_index): """Returns if given index exists in Elasticsearch cluster""" connection_attempts = 0 while connection_attempts < 3: try: es = get_es_object() es_indices = es.indices return es_indices.exists(index=str_valid_index) except exceptions.ConnectionTimeout: connection_attempts += 1 sys.exit('Elasticsearch connection timeout, exiting now...')
Example #10
Source File: connection.py From elasticsearch-py-async with Apache License 2.0 | 5 votes |
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None): url_path = url if params: url_path = '%s?%s' % (url, urlencode(params or {})) url = self.base_url + url_path start = self.loop.time() response = None try: with async_timeout.timeout(timeout or self.timeout, loop=self.loop): response = yield from self.session.request(method, url, data=body, headers=headers) raw_data = yield from response.text() duration = self.loop.time() - start except asyncio.CancelledError: raise except Exception as e: self.log_request_fail(method, url, url_path, body, self.loop.time() - start, exception=e) if isinstance(e, ServerFingerprintMismatch): raise SSLError('N/A', str(e), e) if isinstance(e, asyncio.TimeoutError): raise ConnectionTimeout('TIMEOUT', str(e), e) raise ConnectionError('N/A', str(e), e) finally: if response is not None: yield from response.release() # raise errors based on http status codes, let the client handle those if needed if not (200 <= response.status < 300) and response.status not in ignore: self.log_request_fail(method, url, url_path, body, duration, status_code=response.status, response=raw_data) self._raise_error(response.status, raw_data) self.log_request_success(method, url, url_path, body, response.status, raw_data, duration) return response.status, response.headers, raw_data
Example #11
Source File: geoparse.py From mordecai with MIT License | 5 votes |
def proc_lookup(self, loc): try: loc = self.query_geonames(loc['word']) except ConnectionTimeout: loc = "" return loc
Example #12
Source File: geoparse.py From mordecai with MIT License | 5 votes |
def simple_lookup(self, word): try: loc = self.query_geonames(word) except ConnectionTimeout: loc = "" return loc
Example #13
Source File: es_utils_2_3_tests.py From seqr with GNU Affero General Public License v3.0 | 4 votes |
def test_invalid_get_es_variants(self): search_model = VariantSearch.objects.create(search={}) results_model = VariantSearchResults.objects.create(variant_search=search_model) results_model.families.set(Family.objects.filter(family_id='no_individuals')) self.mock_liftover.side_effect = Exception() self.assertIsNone(_liftover_grch38_to_grch37()) self.assertIsNone(_liftover_grch37_to_grch38()) with self.assertRaises(InvalidIndexException) as cm: get_es_variants(results_model) self.assertEqual(str(cm.exception), 'No es index found') results_model.families.set(self.families) with self.assertRaises(Exception) as cm: get_es_variants(results_model, page=200) self.assertEqual(str(cm.exception), 'Unable to load more than 10000 variants (20000 requested)') search_model.search = {'inheritance': {'mode': 'compound_het'}} search_model.save() with self.assertRaises(Exception) as cm: get_es_variants(results_model) self.assertEqual( str(cm.exception), 'This search returned too many compound heterozygous variants. Please add stricter filters') with self.assertRaises(Exception) as cm: get_es_variant_gene_counts(results_model) self.assertEqual(str(cm.exception), 'This search returned too many genes') search_model.search = {'qualityFilter': {'min_gq': 7}} search_model.save() with self.assertRaises(Exception) as cm: get_es_variants(results_model) self.assertEqual(str(cm.exception), 'Invalid gq filter 7') search_model.search = {} search_model.save() self.mock_multi_search.side_effect = ConnectionTimeout() self.mock_es_client.tasks.list.return_value = {'tasks': { 123: {'running_time_in_nanos': 10}, 456: {'running_time_in_nanos': 10 ** 12}, }} with self.assertRaises(ConnectionTimeout): get_es_variants(results_model) self.assertEqual(self.mock_es_client.tasks.cancel.call_count, 1) self.mock_es_client.tasks.cancel.assert_called_with(parent_task_id=456) _set_cache('index_metadata__test_index,test_index_sv', None) self.mock_es_client.indices.get_mapping.side_effect = Exception('Connection error') with self.assertRaises(InvalidIndexException) as cm: get_es_variants(results_model) self.assertEqual(str(cm.exception), 'Error accessing index "test_index,test_index_sv": Connection error') self.mock_es_client.indices.get_mapping.side_effect = lambda **kwargs: {} with self.assertRaises(InvalidIndexException) as cm: get_es_variants(results_model) self.assertEqual(str(cm.exception), 'Could not find expected indices: test_index_sv, test_index')
Example #14
Source File: async_connection.py From rally with Apache License 2.0 | 4 votes |
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None): url_path = url if params: query_string = urlencode(params) else: query_string = "" # Provide correct URL object to avoid string parsing in low-level code url = yarl.URL.build(scheme=self.scheme, host=self.hostname, port=self.port, path=url, query_string=query_string, encoded=True) start = self.loop.time() response = None try: request_timeout = timeout or self.timeout.total with async_timeout.timeout(request_timeout, loop=self.loop): # override the default session timeout explicitly response = yield from self.session.request(method, url, data=body, headers=headers, timeout=request_timeout) raw_data = yield from response.text() duration = self.loop.time() - start except asyncio.CancelledError: raise except Exception as e: self.log_request_fail(method, url, url_path, body, self.loop.time() - start, exception=e) if isinstance(e, ServerFingerprintMismatch): raise SSLError('N/A', str(e), e) if isinstance(e, asyncio.TimeoutError): raise ConnectionTimeout('TIMEOUT', str(e), e) raise ConnectionError('N/A', str(e), e) finally: if response is not None: yield from response.release() # raise errors based on http status codes, let the client handle those if needed if not (200 <= response.status < 300) and response.status not in ignore: self.log_request_fail(method, url, url_path, body, duration, status_code=response.status, response=raw_data) self._raise_error(response.status, raw_data) self.log_request_success(method, url, url_path, body, response.status, raw_data, duration) return response.status, response.headers, raw_data
Example #15
Source File: document_logs_model.py From quay with Apache License 2.0 | 4 votes |
def yield_logs_for_export( self, start_datetime, end_datetime, repository_id=None, namespace_id=None, max_query_time=None, ): max_query_time = max_query_time.total_seconds() if max_query_time is not None else 300 search = self._base_query_date_range( start_datetime, end_datetime, None, repository_id, namespace_id, None ) def raise_on_timeout(batch_generator): start = time() for batch in batch_generator: elapsed = time() - start if elapsed > max_query_time: logger.error( "Retrieval of logs `%s/%s` timed out with time of `%s`", namespace_id, repository_id, elapsed, ) raise LogsIterationTimeout() yield batch start = time() def read_batch(scroll): batch = [] for log in scroll: batch.append(log) if len(batch) == DEFAULT_RESULT_WINDOW: yield _for_elasticsearch_logs( batch, repository_id=repository_id, namespace_id=namespace_id ) batch = [] if batch: yield _for_elasticsearch_logs( batch, repository_id=repository_id, namespace_id=namespace_id ) search = search.params(size=DEFAULT_RESULT_WINDOW, request_timeout=max_query_time) try: with CloseForLongOperation(config.app_config): for batch in raise_on_timeout(read_batch(search.scan())): yield batch except ConnectionTimeout: raise LogsIterationTimeout()