Python elasticsearch.exceptions.TransportError() Examples
The following are 23
code examples of elasticsearch.exceptions.TransportError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
elasticsearch.exceptions
, or try the search function
.
Example #1
Source File: esbtc.py From blockchain-elasticsearch with Apache License 2.0 | 6 votes |
def add_transaction(self, tx): temp = { '_type': 'doc', '_op_type': 'update', '_index': "btc-opreturn", '_id': "%s-%s" % (tx['tx'], tx['n']), 'doc_as_upsert': True, 'doc': tx } self.transactions.append(temp) if len(self.transactions) > 200: try: self.es_handle.add_bulk_tx(self) except TransportError: import pdb; pdb.set_trace self.transactions = [] self.current = -1
Example #2
Source File: initialize.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def add_viewset(table): data_index = table.name record_data_index = "{}.".format(table.name) deleted_data_index = "{}..".format(table.name) def retrieve(self, request, *args, **kwargs): try: res = es.search(index=record_data_index, doc_type="record-data", body={"query": {"term": {"S-data-id": kwargs["pk"]}}}, sort="S-update-time:desc") except NotFoundError as exc: raise exceptions.NotFound("Document {} was not found in Type data of Index {}".format(kwargs["pk"], record_data_index)) except TransportError as exc: return Response([]) return Response(res["hits"]) viewset = type(table.name, (mixins.RetrieveModelMixin, viewsets.GenericViewSet), dict( permission_classes=(permissions.IsAuthenticated, ), retrieve=retrieve)) setattr(views, table.name, viewset) return viewset
Example #3
Source File: test_commands.py From elasticsearch-django with MIT License | 6 votes |
def test_rebuild_search_index(self, mock_update, mock_create, mock_delete): """Test the rebuild_search_index command.""" cmd = rebuild_search_index.Command() result = cmd.do_index_command( "foo", interactive=False ) # True would hang the tests mock_delete.assert_called_once_with("foo") mock_create.assert_called_once_with("foo") mock_update.assert_called_once_with("foo") self.assertEqual(result["delete"], mock_delete.return_value) self.assertEqual(result["create"], mock_create.return_value) self.assertEqual(result["update"], mock_update.return_value) # check that the delete is handled if the index does not exist mock_delete.side_effect = TransportError("Index not found") result = cmd.do_index_command( "foo", interactive=False ) # True would hang the tests self.assertEqual(result["delete"], {})
Example #4
Source File: elasticsearch.py From py-timeexecution with Apache License 2.0 | 6 votes |
def __init__( self, hosts=None, index="metrics", doc_type="metric", index_pattern="{index}-{date:%Y.%m.%d}", *args, **kwargs ): # Assign these in the backend as they are needed when writing metrics # to elasticsearch self.index = index self.doc_type = doc_type self.index_pattern = index_pattern # setup the client self.client = Elasticsearch(hosts=hosts, *args, **kwargs) # ensure the index is created try: self._setup_index() except TransportError as exc: logger.error('index setup error %r', exc) try: self._setup_mapping() except TransportError as exc: logger.error('mapping setup error %r', exc)
Example #5
Source File: elasticsearch.py From py-timeexecution with Apache License 2.0 | 6 votes |
def write(self, name, **data): """ Write the metric to elasticsearch Args: name (str): The name of the metric to write data (dict): Additional data to store with the metric """ data["name"] = name if not ("timestamp" in data): data["timestamp"] = datetime.utcnow() try: self.client.index(index=self.get_index(), doc_type=self.doc_type, id=None, body=data) except TransportError as exc: logger.warning('writing metric %r failure %r', data, exc)
Example #6
Source File: elasticsearch.py From py-timeexecution with Apache License 2.0 | 6 votes |
def bulk_write(self, metrics): """ Write multiple metrics to elasticsearch in one request Args: metrics (list): data with mappings to send to elasticsearch """ actions = [] index = self.get_index() for metric in metrics: actions.append({'index': {'_index': index, '_type': self.doc_type}}) actions.append(metric) try: self.client.bulk(actions) except TransportError as exc: logger.warning('bulk_write metrics %r failure %r', metrics, exc)
Example #7
Source File: rebuild_search_index.py From elasticsearch-django with MIT License | 6 votes |
def do_index_command(self, index: str, **options: Any) -> CommandReturnType: """Rebuild search index.""" if options["interactive"]: logger.warning("This will permanently delete the index '%s'.", index) if not self._confirm_action(): logger.warning( "Aborting rebuild of index '%s' at user's request.", index ) return None try: delete = delete_index(index) except TransportError: delete = {} logger.info("Index %s does not exist, cannot be deleted.", index) create = create_index(index) update = update_index(index) return {"delete": delete, "create": create, "update": update}
Example #8
Source File: test_elasticsearch.py From py-timeexecution with Apache License 2.0 | 5 votes |
def test_error_warning(self, mocked_logger): transport_error = TransportError('mocked error') es_index_error_ctx = mock.patch( 'time_execution.backends.elasticsearch.Elasticsearch.index', side_effect=transport_error ) frozen_time_ctx = freeze_time('2016-07-13') with es_index_error_ctx, frozen_time_ctx: self.backend.write(name='test:metric', value=None) mocked_logger.warning.assert_called_once_with( 'writing metric %r failure %r', {'timestamp': datetime(2016, 7, 13), 'value': None, 'name': 'test:metric'}, transport_error, )
Example #9
Source File: entity_resolver.py From mindmeld with Apache License 2.0 | 5 votes |
def load(self): """Loads the trained entity resolution model from disk.""" try: if self._use_text_rel: scoped_index_name = get_scoped_index_name( self._app_namespace, self._es_index_name ) if not self._es_client.indices.exists(index=scoped_index_name): self.fit() else: self.fit() except EsConnectionError as e: logger.error( "Unable to connect to Elasticsearch: %s details: %s", e.error, e.info ) raise EntityResolverConnectionError(es_host=self._es_client.transport.hosts) except TransportError as e: logger.error( "Unexpected error occurred when sending requests to Elasticsearch: %s " "Status code: %s details: %s", e.error, e.status_code, e.info, ) raise EntityResolverError except ElasticsearchException: raise EntityResolverError
Example #10
Source File: test_aws_elasticsearch_connection.py From edx-analytics-pipeline with GNU Affero General Public License v3.0 | 5 votes |
def test_boto_service_unavailable(self, mock_make_request): connection = AwsHttpConnection(aws_access_key_id='access_key', aws_secret_access_key='secret') mock_make_request.side_effect = BotoServerError(503, 'Service Unavailable') try: connection.perform_request('get', 'http://example.com') except TransportError as transport_error: self.assertEqual(transport_error.status_code, 503) else: self.fail('Expected a transport error to be raised.')
Example #11
Source File: search.py From elasticsearch-dsl-py with Apache License 2.0 | 5 votes |
def execute(self, ignore_cache=False, raise_on_error=True): """ Execute the multi search request and return a list of search results. """ if ignore_cache or not hasattr(self, '_response'): es = get_connection(self._using) responses = es.msearch( index=self._index, body=self.to_dict(), **self._params ) out = [] for s, r in zip(self._searches, responses['responses']): if r.get('error', False): if raise_on_error: raise TransportError('N/A', r['error']['type'], r['error']) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response
Example #12
Source File: test_commands.py From elasticsearch-django with MIT License | 5 votes |
def test_handle(self, mock_do, mock_log): """Test the main handle method calls do_index_command.""" obj = BaseSearchCommand() obj.handle(indexes=["foo", "bar"]) # this should have called the do_index_command twice mock_do.assert_has_calls([mock.call("foo"), mock.call("bar")]) mock_do.reset_mock() mock_do.side_effect = TransportError( 123, "oops", {"error": {"reason": "no idea"}} ) obj.handle(indexes=["baz"]) mock_do.assert_called_once_with("baz") mock_log.warning.assert_called_once()
Example #13
Source File: __init__.py From elasticsearch-django with MIT License | 5 votes |
def handle(self, *args: Any, **options: Any) -> None: """Run do_index_command on each specified index and log the output.""" for index in options.pop("indexes"): try: data = self.do_index_command(index, **options) except TransportError as ex: logger.warning("ElasticSearch threw an error: %s", ex) data = {"index": index, "status": ex.status_code, "reason": ex.error} finally: logger.info(data)
Example #14
Source File: test_elasticsearch.py From py-timeexecution with Apache License 2.0 | 5 votes |
def test_bulk_write_error(self, mocked_logger): transport_error = TransportError('mocked error') es_index_error_ctx = mock.patch( 'time_execution.backends.elasticsearch.Elasticsearch.bulk', side_effect=transport_error ) metrics = [1, 2, 3] with es_index_error_ctx: self.backend.bulk_write(metrics) mocked_logger.warning.assert_called_once_with('bulk_write metrics %r failure %r', metrics, transport_error)
Example #15
Source File: search.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def execute(self, ignore_cache=False, raise_on_error=True): """ Execute the multi search request and return a list of search results. """ if ignore_cache or not hasattr(self, '_response'): es = connections.get_connection(self._using) responses = es.msearch( index=self._index, body=self.to_dict(), **self._params ) out = [] for s, r in zip(self._searches, responses['responses']): if r.get('error', False): if raise_on_error: raise TransportError('N/A', r['error']['type'], r['error']) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response
Example #16
Source File: utils.py From searchlight with Apache License 2.0 | 5 votes |
def create_new_index(group): """Create a new index for a specific Resource Type Group. Upon exit of this method, the index is still not ready to be used. The index still needs to have the settings/mappings set for each plugin (Document Type). """ es_engine = searchlight.elasticsearch.get_api() kwargs = {} index_settings = _get_index_settings_from_config() if index_settings: kwargs = {'body': {'index': index_settings}} index_name = None while not index_name: # Use utcnow() to ensure that the name is unique. now = oslo_utils.timeutils.utcnow() index_name = (group + '-' + now.strftime(FORMAT)) try: es_engine.indices.create(index=index_name, **kwargs) except es_exc.TransportError as e: if (e.error.startswith("IndexAlreadyExistsException") or e.error.startswith("index_already_exists_exception")): # This index already exists! Try again. index_name = None else: raise return index_name
Example #17
Source File: __init__.py From elastalert with Apache License 2.0 | 5 votes |
def es_version(self): """ Returns the reported version from the Elasticsearch server. """ if self._es_version is None: for retry in range(3): try: self._es_version = self.info()['version']['number'] break except TransportError: if retry == 2: raise time.sleep(3) return self._es_version
Example #18
Source File: test_elasticsearch.py From nefertari with Apache License 2.0 | 5 votes |
def test_perform_request_no_index(self, mock_log): mock_log.level = logging.DEBUG mock_log.debug.side_effect = TransportError( 404, 'IndexMissingException') conn = es.ESHttpConnection() with pytest.raises(es.IndexNotFoundException): conn.perform_request('POST', 'http://localhost:9200')
Example #19
Source File: test_elasticsearch.py From nefertari with Apache License 2.0 | 5 votes |
def test_perform_request_exception(self): conn = es.ESHttpConnection() conn.pool = Mock() conn.pool.urlopen.side_effect = TransportError('N/A', '') with pytest.raises(JHTTPBadRequest): conn.perform_request('POST', 'http://localhost:9200')
Example #20
Source File: es_backup.py From elasticsearch-snapshots with MIT License | 5 votes |
def take_snapshot(options): esm = ElasticsearchSnapshotManager(options) sh = esm.sh snapshot = options.snapshot and options.snapshot or 'all_' + time.strftime('%Y%m%d%H') snapdef = { "include_global_state": True } if options.indices: snapdef['indices'] = ','.join(options.indices) try: sh.create(repository=options.repository, snapshot=snapshot, body=json.dumps(snapdef), wait_for_completion=options.wait, request_timeout=7200) # Housekeeping - delete old snapshots snapshots = sh.get(repository=options.repository, snapshot="_all", request_timeout=120)['snapshots'] num_snaps = len(snapshots) if num_snaps > options.keep: up_to = num_snaps - options.keep logger.info('TOTAL: %d - Will delete 1 -> %d' % (num_snaps, up_to + 1)) for snap in snapshots[0:up_to]: sh.delete(repository=options.repository, snapshot=snap['snapshot'], request_timeout=3600) logger.info('Deleted snapshot %s' % snap['snapshot']) except exceptions.TransportError as e: pass
Example #21
Source File: search.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def execute(self, ignore_cache=False, raise_on_error=True): """ Execute the multi search request and return a list of search results. """ if ignore_cache or not hasattr(self, '_response'): es = connections.get_connection(self._using) responses = es.msearch( index=self._index, body=self.to_dict(), **self._params ) out = [] for s, r in zip(self._searches, responses['responses']): if r.get('error', False): if raise_on_error: raise TransportError('N/A', r['error']['type'], r['error']) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response
Example #22
Source File: es_restore.py From elasticsearch-snapshots with MIT License | 4 votes |
def restore_snapshot(options): esm = ElasticsearchSnapshotManager(options) sh = esm.sh if options.list: snapshots = sh.get(repository=options.repository, snapshot="_all", request_timeout=120)['snapshots'] print 'Total number of snapshots: %d\n' % len(snapshots) for i, snap in enumerate(snapshots): print '[%4d] NAME: %14s | START/END: %s -> %s | STATE: %7s | INDICES: %s' % (i + 1, snap['snapshot'], snap['start_time'], 'end_time' in snap and snap['end_time'] or '...', snap['state'], ', '.join(snap['indices'])) else: fail = True snapdef = { "include_global_state": True } if options.indices: snapdef['indices'] = ','.join(options.indices) if options.snapshot: try: snapshot = sh.get(repository=options.repository, snapshot=options.snapshot)['snapshots'][0] except: logger.error('Snapshot "%s" not found in S3 bucket "%s" for repository "%s"' % (options.snapshot, options.bucket, options.repository)) snapshot = None else: try: snapshot = sh.get(repository=options.repository, snapshot="_all", request_timeout=120)['snapshots'][-1] except: snapshot = None if snapshot: try: # Perform the restore logger.info('Starting restore of snapshot "%s" from bucket "%s"' % (snapshot['snapshot'], options.bucket)) try: sh.restore(repository=options.repository, snapshot=snapshot['snapshot'], body=json.dumps(snapdef), wait_for_completion=options.wait, request_timeout=7200) logger.info('Restore of snapshot "%s" has started' % snapshot['snapshot']) fail = False if options.slackurl: post_to_slack(url=options.slackurl, snapshot=snapshot['snapshot'], prefix=options.prefix, channel=options.slackchan) if options.flowdock: post_to_flowdock(options.flowdock, snapshot=snapshot['snapshot'], prefix=options.prefix) except exceptions.TransportError as e: logger.warning('Unable to restore snapshot "%s": %s' % (snapshot['snapshot'], e.error)) except NameError as e: logger.warning('No snapshots found for bucket "%s" with prefix "%s"' % (options.bucket, options.prefix)) else: logger.error('Unable to fetch a snapshot to restore from') sys.exit(fail)
Example #23
Source File: elasticsearch_load.py From edx-analytics-pipeline with GNU Affero General Public License v3.0 | 4 votes |
def send_bulk_action_batch(self, elasticsearch_client, bulk_action_batch): """ Given a batch of actions, transmit them in bulk to the elasticsearch cluster. This method handles back-pressure from the elasticsearch cluster which queues up writes. When the queue is full the cluster will start rejecting additional bulk indexing requests. This method implements an exponential back-off, allowing the cluster to catch-up with the client. Arguments: elasticsearch_client (elasticsearch.Elasticsearch): A reference to an elasticsearch client. bulk_action_batch (list of dicts): A list of bulk actions followed by their respective documents. Raises: IndexingError: If a record cannot be indexed by elasticsearch this method assumes that is a fatal error and it immediately raises this exception. If we try to transmit a batch repeatedly and it is continually rejected by the cluster, this method will give up after `max_attempts` and raise this error. Returns: True iff the batch of actions was successfully transmitted to and acknowledged by the elasticsearch cluster. """ attempts = 0 batch_written_successfully = False while True: try: resp = elasticsearch_client.bulk(bulk_action_batch, index=self.index, doc_type=self.doc_type) except TransportError as transport_error: if transport_error.status_code not in (REJECTED_REQUEST_STATUS, HTTP_SERVICE_UNAVAILABLE_STATUS_CODE): raise transport_error else: num_errors = 0 for raw_data in resp['items']: _op_type, item = raw_data.popitem() successful = 200 <= item.get('status', 500) < 300 if not successful: log.error('Failed to index: %s', str(item)) num_errors += 1 if num_errors == 0: batch_written_successfully = True break else: raise IndexingError('Failed to index {0} records. Aborting.'.format(num_errors)) attempts += 1 if attempts < self.max_attempts: sleep_duration = 2 ** attempts self.incr_counter('Elasticsearch', 'Rejected Batches', 1) log.warn( 'Batch of records rejected. Sleeping for %d seconds before retrying.', sleep_duration ) time.sleep(sleep_duration) else: batch_written_successfully = False break return batch_written_successfully