Python elasticsearch.exceptions.NotFoundError() Examples
The following are 30
code examples of elasticsearch.exceptions.NotFoundError().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
elasticsearch.exceptions
, or try the search function
.
Example #1
Source File: fake_elasticsearch.py From airflow with Apache License 2.0 | 7 votes |
def delete(self, index, doc_type, id, params=None): found = False if index in self.__documents_dict: for document in self.__documents_dict[index]: if document.get('_type') == doc_type and document.get('_id') == id: found = True self.__documents_dict[index].remove(document) break result_dict = { 'found': found, '_index': index, '_type': doc_type, '_id': id, '_version': 1, } if found: return result_dict else: raise NotFoundError(404, json.dumps(result_dict))
Example #2
Source File: fake_elasticsearch.py From elasticmock with MIT License | 6 votes |
def suggest(self, body, index=None, params=None, headers=None): if index is not None and index not in self.__documents_dict: raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index)) result_dict = {} for key, value in body.items(): text = value.get('text') suggestion = int(text) + 1 if isinstance(text, int) else '{0}_suggestion'.format(text) result_dict[key] = [ { 'text': text, 'length': 1, 'options': [ { 'text': suggestion, 'freq': 1, 'score': 1.0 } ], 'offset': 0 } ] return result_dict
Example #3
Source File: utils.py From searchlight with Apache License 2.0 | 6 votes |
def find_missing_types(index_type_mapping): """Find if doc types are not exist in given indices""" missing_index, missing_type = [], [] if not index_type_mapping: return missing_index, missing_type es_engine = searchlight.elasticsearch.get_api() for index in index_type_mapping.keys(): for doc_type in index_type_mapping[index]: try: mapping = es_engine.indices.get_mapping(index, doc_type) if not mapping: missing_type.append(doc_type) except es_exc.NotFoundError: missing_index.append(index) return set(missing_index), set(missing_type)
Example #4
Source File: indexing_api_test.py From micromasters with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_delete_percolate_queries(self): """Test that we delete the percolate query from the index""" query = {"query": {"match": {"profile.first_name": "here"}}} with patch('search.signals.transaction', on_commit=lambda callback: callback()): percolate_query = PercolateQueryFactory.create(query=query, original_query="original") assert es.get_percolate_query(percolate_query.id) == { '_id': str(percolate_query.id), '_index': es.get_default_backing_index(PERCOLATE_INDEX_TYPE), '_source': query, '_seq_no': 0, '_primary_term': 1, '_type': GLOBAL_DOC_TYPE, '_version': 1, 'found': True, } delete_percolate_query(percolate_query.id) with self.assertRaises(NotFoundError): es.get_percolate_query(percolate_query.id) # If we delete it again there should be no exception delete_percolate_query(percolate_query.id) with self.assertRaises(NotFoundError): es.get_percolate_query(percolate_query.id)
Example #5
Source File: notification_handlers.py From searchlight with Apache License 2.0 | 6 votes |
def delete(self, event_type, payload, timestamp): zone_id = payload['id'] version = self.get_version(payload, timestamp, preferred_date_field='deleted_at') delete_recordset = self.recordset_helper.delete_documents_with_parent( zone_id, version=version) items = [pipeline.DeleteItem( self.recordset_helper.plugin, event_type, payload, rs['_id']) for rs in delete_recordset] try: self.index_helper.delete_document( {'_id': zone_id, '_version': version}) items.append(pipeline.DeleteItem(self.index_helper.plugin, event_type, payload, zone_id)) except exceptions.NotFoundError: msg = "Zone %s not found when deleting" LOG.error(msg, zone_id) return items
Example #6
Source File: utils.py From bitshares-explorer-api with MIT License | 6 votes |
def needs_es(index_name=None): def inner_function(function=None): @wraps(function) def wrapper(*args, **kwargs): if index_name is not None: # TODO pre-check, might not be necessary pass try: return function(*args, **kwargs) except NotFoundError as e: not_found = getattr(e, "info", {}).get("error", {}).get("root_cause", [{}])[0].get("resource.id", None) message = "The required index does not exist in this ElasticSearch database" if not_found is not None: message = message + " (" + str(not_found) + ")" abort(404, message) return wrapper if index_name is not None and not isinstance(index_name, str): return inner_function(index_name) else: return inner_function
Example #7
Source File: views.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def destroy(self, request, *args, **kwargs): try: res = es.get(index="test_12", doc_type="one", id=kwargs["pk"]) data = res["_source"] data["S_delete_time"] = datetime.datetime.now().isoformat() data["S_delete_people"] = request.user.username res = es.create(index="test_32", doc_type="one", id=kwargs["pk"], body=data) es.delete(index="test_12", doc_type="one", id=kwargs["pk"]) es.delete_by_query(index="test_22", doc_type="one", body={"query": {"match": {"S_data_id": kwargs["pk"]}}}) except NotFoundError as exc: raise exceptions.ParseError("Document {} was not found in Type one of Index test_12".format(kwargs["pk"])) except Exception as exc: raise exceptions.APIException("内部错误,错误类型: {}".format(type(exc))) return Response(res, status=status.HTTP_204_NO_CONTENT) # t = TestViewset() # t.paginate_queryset()
Example #8
Source File: esbtc.py From blockchain-elasticsearch with Apache License 2.0 | 6 votes |
def add_block(self, block, force_add=False): "Add a block. Do nothing if the block already exists" read_index = "btc-blocks-*" the_index = "btc-blocks" exists = False try: #self.es.get(index=the_index, doc_type="doc", id=block['hash']) self.es.get(index=read_index, id=block['hash']) exists = True except NotFoundError: # We need to add this block exists = False if exists is False or force_add is True: #self.es.update(id=block['hash'], index=the_index, doc_type='doc', body={'doc' :block, 'doc_as_upsert': True}, request_timeout=30) self.es.update(id=block['hash'], index=the_index, body={'doc' :block, 'doc_as_upsert': True}, request_timeout=30)
Example #9
Source File: fake_elasticsearch.py From airflow with Apache License 2.0 | 6 votes |
def _normalize_index_to_list(self, index): # Ensure to have a list of index if index is None: searchable_indexes = self.__documents_dict.keys() elif isinstance(index, str): searchable_indexes = [index] elif isinstance(index, list): searchable_indexes = index else: # Is it the correct exception to use ? raise ValueError("Invalid param 'index'") # Check index(es) exists for searchable_index in searchable_indexes: if searchable_index not in self.__documents_dict: raise NotFoundError(404, 'IndexMissingException[[{0}] missing]' .format(searchable_index)) return searchable_indexes
Example #10
Source File: fake_elasticsearch.py From airflow with Apache License 2.0 | 6 votes |
def suggest(self, body, index=None): if index is not None and index not in self.__documents_dict: raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index)) result_dict = {} for key, value in body.items(): text = value.get('text') suggestion = int(text) + 1 if isinstance(text, int) \ else '{0}_suggestion'.format(text) result_dict[key] = [ { 'text': text, 'length': 1, 'options': [ { 'text': suggestion, 'freq': 1, 'score': 1.0 } ], 'offset': 0 } ] return result_dict
Example #11
Source File: fake_elasticsearch.py From airflow with Apache License 2.0 | 6 votes |
def get(self, index, id, doc_type='_all', params=None): result = None if index in self.__documents_dict: result = self.find_document(doc_type, id, index, result) if result: result['found'] = True else: error_data = { '_index': index, '_type': doc_type, '_id': id, 'found': False } raise NotFoundError(404, json.dumps(error_data)) return result
Example #12
Source File: initialize.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def add_viewset(table): data_index = table.name record_data_index = "{}.".format(table.name) deleted_data_index = "{}..".format(table.name) def retrieve(self, request, *args, **kwargs): try: res = es.search(index=record_data_index, doc_type="record-data", body={"query": {"term": {"S-data-id": kwargs["pk"]}}}, sort="S-update-time:desc") except NotFoundError as exc: raise exceptions.NotFound("Document {} was not found in Type data of Index {}".format(kwargs["pk"], record_data_index)) except TransportError as exc: return Response([]) return Response(res["hits"]) viewset = type(table.name, (mixins.RetrieveModelMixin, viewsets.GenericViewSet), dict( permission_classes=(permissions.IsAuthenticated, ), retrieve=retrieve)) setattr(views, table.name, viewset) return viewset
Example #13
Source File: views.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data # indices = data["indices"] if data["indices"] else "_all" indices = self.get_indices(data["indices"]) sort = ",".join(reversed(list(map(lambda i: ":".join(i), data["sort"].items())))) try: res = es.search(index=indices, doc_type=self._doc_type, from_=data["page_size"] * (data["page"]-1), size=data["page_size"], sort=sort, q=data["query"], analyze_wildcard=True) except NotFoundError as exc: return Response({ "hits": [], "max_score": None, "total": 0 }) except RequestError as exc: raise exceptions.ParseError("Search statement error: "+str(exc)) return Response(res["hits"])
Example #14
Source File: views.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data # indices = data["indices"] if data["indices"] else "_all" indices = self.get_indices(data["indices"]) sort = ",".join(reversed(list(map(lambda i: ":".join(i), data["sort"].items())))) try: res = es.search(index=indices, doc_type=self._doc_type, from_=data["page_size"] * (data["page"]-1), size=data["page_size"], sort=sort, body=data["body"], analyze_wildcard=True) except NotFoundError as exc: return Response({ "hits": [], "max_score": None, "total": 0 }) except RequestError as exc: raise exceptions.ParseError("Search statement error: "+str(exc)) return Response(res["hits"])
Example #15
Source File: es-s3-snapshot.py From elasticsearch-s3-backup with MIT License | 6 votes |
def reopen_indices(es, index_list): """ Re-open indices (used to ensure indices are re-opened after any restore operation) Parameters: es : ElasticSearch connection object index_list : List of ElasticSearch indices that needs to be open """ try: for index in index_list: print ("[INFO] reopen_indices(): Opening index: '%s'" %(index)) es.indices.open(index=index, ignore_unavailable=True) except NotFoundError: print ("\n\n[WARN] Could not reopen missing index on Target ES cluster: '%s'" %(index)) except Exception as e: print ("\n\n[ERROR] Unexpected error in reopen_indices(): %s" %(str(e)))
Example #16
Source File: fake_elasticsearch.py From elasticmock with MIT License | 6 votes |
def get(self, index, id, doc_type='_all', params=None, headers=None): result = None if index in self.__documents_dict: for document in self.__documents_dict[index]: if document.get('_id') == id: if doc_type == '_all': result = document break else: if document.get('_type') == doc_type: result = document break if result: result['found'] = True else: error_data = { '_index': index, '_type': doc_type, '_id': id, 'found': False } raise NotFoundError(404, json.dumps(error_data)) return result
Example #17
Source File: categories.py From richie with MIT License | 6 votes |
def retrieve(self, request, pk, version, kind): """ Return a single item by ID """ # Wrap the ES get in a try/catch to we control the exception we emit — it would # raise and end up in a 500 error otherwise try: query_response = ES_CLIENT.get( index=self._meta.indexer.index_name, doc_type=self._meta.indexer.document_type, id=pk, ) except NotFoundError: raise NotFound # Format a clean category object as a response return Response( self._meta.indexer.format_es_object_for_api( query_response, # Get the best language we can return multilingual fields in get_language_from_request(request), ) )
Example #18
Source File: persons.py From richie with MIT License | 6 votes |
def retrieve(self, request, pk, version): """ Return a single person by ID """ # Wrap the ES get in a try/catch so we control the exception we emit — it would # raise and end up in a 500 error otherwise try: query_response = ES_CLIENT.get( index=self._meta.indexer.index_name, doc_type=self._meta.indexer.document_type, id=pk, ) except NotFoundError: return Response(status=404) # Format a clean person object as a response return Response( self._meta.indexer.format_es_object_for_api( query_response, # Get the best language we can return multilingual fields in get_language_from_request(request), ) )
Example #19
Source File: organizations.py From richie with MIT License | 6 votes |
def retrieve(self, request, pk, version): """ Return a single organization by ID """ # Wrap the ES get in a try/catch so we control the exception we emit — it would # raise and end up in a 500 error otherwise try: query_response = ES_CLIENT.get( index=self._meta.indexer.index_name, doc_type=self._meta.indexer.document_type, id=pk, ) except NotFoundError: return Response(status=404) # Format a clean organization object as a response return Response( self._meta.indexer.format_es_object_for_api( query_response, # Get the best language we can return multilingual fields in get_language_from_request(request), ) )
Example #20
Source File: courses.py From richie with MIT License | 6 votes |
def retrieve(self, request, pk, version): """ Return a single course by ID """ # Wrap the ES get in a try/catch to we control the exception we emit — it would # raise and end up in a 500 error otherwise try: query_response = ES_CLIENT.get( index=self._meta.indexer.index_name, doc_type=self._meta.indexer.document_type, id=pk, ) except NotFoundError: return Response(status=404) # Format a clean course object as a response return Response(self._meta.indexer.format_es_object_for_api(query_response))
Example #21
Source File: elastic.py From timesketch with Apache License 2.0 | 6 votes |
def count(self, indices): """Count number of documents. Args: indices: List of indices. Returns: Number of documents. """ if not indices: return 0 try: result = self.client.count(index=indices) except (NotFoundError, RequestError) as e: es_logger.error( 'Unable to count indexes (index not found), with ' 'error: {0!s}'.format(e)) return 0 return result.get('count', 0)
Example #22
Source File: rest.py From memex-explorer with BSD 2-Clause "Simplified" License | 6 votes |
def get(self, request, format=None): # TODO: catch all exception. At the very least, deal with 404 not found and # connection refused exceptions. # Temporarily remove exceptions for debugging. try: trail_ids = [x["key"] for x in self.es.search(index=self.index, body={ "aggs" : { "trail_id" : { "terms" : { "field" : "trail_id" } } } })["aggregations"]["trail_id"]["buckets"]] response = self.create_trails(trail_ids) except ConnectionError as e: raise OSError("Failed to connect to local elasticsearch instance.") except NotFoundError: raise DataWakeIndexUnavailable return Response(response)
Example #23
Source File: utils.py From bungiesearch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def delete_index_item(item, model_name, refresh=True): ''' Deletes an item from the index. :param item: must be a serializable object. :param model_name: doctype, which must also be the model name. :param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True. ''' src = Bungiesearch() logger.info('Getting index for model {}.'.format(model_name)) for index_name in src.get_index(model_name): index_instance = src.get_model_index(model_name) item_es_id = index_instance.fields['_id'].value(item) try: src.get_es_instance().delete(index_name, model_name, item_es_id) except NotFoundError as e: logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e))) if refresh: src.get_es_instance().indices.refresh(index=index_name)
Example #24
Source File: document_logs_model.py From quay with Apache License 2.0 | 6 votes |
def _load_latest_logs(self, performer_id, repository_id, account_id, filter_kinds, size): """ Return the latest logs from Elasticsearch. Look at indices up to theset logrotateworker threshold, or up to 30 days if not defined. """ # Set the last index to check to be the logrotateworker threshold, or 30 days end_datetime = datetime.now() start_datetime = end_datetime - timedelta(days=DATE_RANGE_LIMIT) latest_logs = [] for day in _date_range_descending(start_datetime, end_datetime, includes_end_datetime=True): try: logs = self._load_logs_for_day( day, performer_id, repository_id, account_id, filter_kinds, size=size ) latest_logs.extend(logs) except NotFoundError: continue if len(latest_logs) >= size: break return _for_elasticsearch_logs(latest_logs[:size], repository_id, account_id)
Example #25
Source File: initialize.py From cmdb with GNU Lesser General Public License v3.0 | 6 votes |
def add_viewset(table): data_index = table.name record_data_index = "{}.".format(table.name) deleted_data_index = "{}..".format(table.name) def list(self, request, *args, **kwargs): page = int(request.query_params.get("page", 1)) page_size = int(request.query_params.get("page_size", 10)) try: res = es.search(index=deleted_data_index, doc_type="deleted-data", size=page_size, from_=(page-1)*page_size) except Exception as exc: raise exceptions.APIException("内部错误,错误类型: {}".format(type(exc))) return Response(res["hits"]) def retrieve(self, request, *args, **kwargs): try: res = es.get(index=deleted_data_index, doc_type="data", id=kwargs["pk"]) except NotFoundError as exc: raise exceptions.NotFound("Document {} was not found in Type {} of Index {}".format(kwargs["pk"], "data", table.name)) viewset = type(table.name, (mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet), dict(permission_classes=(c_permissions.TableLevelPermission, ), list=list, retrieve=retrieve)) setattr(views, table.name, viewset) return viewset
Example #26
Source File: fake_elasticsearch.py From elasticmock with MIT License | 6 votes |
def delete(self, index, doc_type, id, params=None, headers=None): found = False if index in self.__documents_dict: for document in self.__documents_dict[index]: if document.get('_type') == doc_type and document.get('_id') == id: found = True self.__documents_dict[index].remove(document) break result_dict = { 'found': found, '_index': index, '_type': doc_type, '_id': id, '_version': 1, } if found: return result_dict else: raise NotFoundError(404, json.dumps(result_dict))
Example #27
Source File: fake_elasticsearch.py From elasticmock with MIT License | 6 votes |
def _normalize_index_to_list(self, index): # Ensure to have a list of index if index is None: searchable_indexes = self.__documents_dict.keys() elif isinstance(index, str) or isinstance(index, unicode): searchable_indexes = [index] elif isinstance(index, list): searchable_indexes = index else: # Is it the correct exception to use ? raise ValueError("Invalid param 'index'") # Check index(es) exists for searchable_index in searchable_indexes: if searchable_index not in self.__documents_dict: raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(searchable_index)) return searchable_indexes
Example #28
Source File: indexing_api_test.py From micromasters with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_index_percolate_query(self): """Test that we index the percolate query""" query = {"query": {"match": {"profile.first_name": "here"}}} percolate_query = PercolateQueryFactory.create(query=query, original_query="original") percolate_query_id = 123 percolate_query.id = percolate_query_id # Don't save since that will trigger a signal which will update the index with self.assertRaises(NotFoundError): es.get_percolate_query(percolate_query_id) index_percolate_queries([percolate_query]) assert es.get_percolate_query(percolate_query_id) == { '_id': str(percolate_query_id), '_index': es.get_default_backing_index(PERCOLATE_INDEX_TYPE), '_source': query, '_seq_no': 0, '_primary_term': 1, '_type': GLOBAL_DOC_TYPE, '_version': 1, 'found': True, }
Example #29
Source File: crawl.py From weapp-zhihulive with Apache License 2.0 | 5 votes |
def parse_zhuanlan_link(self, response): posts = await response.json() if response.status == 200 and posts: for post in posts: cover = post['titleImage'] if not cover: continue s = Live.search() title = post['title'] for sep in ('-', '—'): if sep in title: title = title.split(sep)[-1].strip() speaker_id = post['author']['hash'] zid = post['url'].split('/')[-1] s = s.query(Q('match_phrase', subject=title)) lives = await s.execute() for live in lives: if live.speaker and live.speaker.speaker_id == speaker_id: await self.update_live(zid, cover, live) break else: match = LIVE_REGEX.search(post['content']) if match: live_id = match.group(2) try: live = await Live.get(live_id) except NotFoundError: pass else: await self.update_live(zid, cover, live) return get_next_url(response.url)
Example #30
Source File: search_controller.py From cccatalog-api with MIT License | 5 votes |
def get_sources(index): """ Given an index, find all available data sources and return their counts. :param index: An Elasticsearch index, such as `'image'`. :return: A dictionary mapping sources to the count of their images.` """ source_cache_name = 'sources-' + index sources = cache.get(key=source_cache_name) if type(sources) == list: # Invalidate old provider format. cache.delete(key=source_cache_name) if not sources: # Don't increase `size` without reading this issue first: # https://github.com/elastic/elasticsearch/issues/18838 size = 100 agg_body = { 'aggs': { 'unique_sources': { 'terms': { 'field': 'source.keyword', 'size': size, "order": { "_key": "desc" } } } } } try: results = es.search(index=index, body=agg_body, request_cache=True) buckets = results['aggregations']['unique_sources']['buckets'] except NotFoundError: buckets = [{'key': 'none_found', 'doc_count': 0}] sources = {result['key']: result['doc_count'] for result in buckets} cache.set( key=source_cache_name, timeout=CACHE_TIMEOUT, value=sources ) return sources