Java Code Examples for org.elasticsearch.search.SearchHit#getId()
The following examples show how to use
org.elasticsearch.search.SearchHit#getId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HashJoinElasticExecutor.java From elasticsearch-sql with Apache License 2.0 | 6 votes |
private void createKeyToResultsAndFillOptimizationStructure(Map<String,Map<String, List<Object>>> optimizationTermsFilterStructure, TableInJoinRequestBuilder firstTableRequest) { List<SearchHit> firstTableHits = fetchAllHits(firstTableRequest); int resultIds = 1; for (SearchHit hit : firstTableHits) { HashMap<String, List<Map.Entry<Field, Field>>> comparisons = this.hashJoinComparisonStructure.getComparisons(); for (Map.Entry<String, List<Map.Entry<Field, Field>>> comparison : comparisons.entrySet()) { String comparisonID = comparison.getKey(); List<Map.Entry<Field, Field>> t1ToT2FieldsComparison = comparison.getValue(); String key = getComparisonKey(t1ToT2FieldsComparison, hit, true, optimizationTermsFilterStructure.get(comparisonID)); //int docid , id SearchHit searchHit = new SearchHit(resultIds, hit.getId(), new Text(hit.getType()), hit.getFields(), null); searchHit.sourceRef(hit.getSourceRef()); onlyReturnedFields(searchHit.getSourceAsMap(), firstTableRequest.getReturnedFields(),firstTableRequest.getOriginalSelect().isSelectAll()); resultIds++; this.hashJoinComparisonStructure.insertIntoComparisonHash(comparisonID, key, searchHit); } } }
Example 2
Source File: IntersectExecutor.java From elasticsearch-sql with Apache License 2.0 | 6 votes |
private void fillIntersectHitsFromResults(Set<ComperableHitResult> comparableHitResults) { int currentId = 1; List<SearchHit> intersectHitsList = new ArrayList<>(comparableHitResults.size()); Set<Map.Entry<String, String>> firstTableFieldToAlias = this.builder.getFirstTableFieldToAlias().entrySet(); for (ComperableHitResult result : comparableHitResults) { SearchHit originalHit = result.getOriginalHit(); SearchHit searchHit = new SearchHit(currentId, originalHit.getId(), new Text(originalHit.getType()), originalHit.getFields(), null); searchHit.sourceRef(originalHit.getSourceRef()); searchHit.getSourceAsMap().clear(); Map<String, Object> sourceAsMap = result.getFlattenMap(); for (Map.Entry<String, String> entry : firstTableFieldToAlias) { if (sourceAsMap.containsKey(entry.getKey())) { Object value = sourceAsMap.get(entry.getKey()); sourceAsMap.remove(entry.getKey()); sourceAsMap.put(entry.getValue(), value); } } searchHit.getSourceAsMap().putAll(sourceAsMap); currentId++; intersectHitsList.add(searchHit); } int totalSize = currentId - 1; SearchHit[] unionHitsArr = intersectHitsList.toArray(new SearchHit[totalSize]); this.intersectHits = new SearchHits(unionHitsArr, new TotalHits(totalSize, TotalHits.Relation.EQUAL_TO), 1.0f); }
Example 3
Source File: KibanaUtils.java From openshift-elasticsearch-plugin with Apache License 2.0 | 6 votes |
/** * Determine a set of projects based on the index patterns that * were generated by this plugin. Any index patterns that are user * generated which do not match the format 'project.$NAME.$UID.*' * are ignored * * @param context The OpenshiftRequestContext for this request * @return a set of projects */ public Set<Project> getProjectsFromIndexPatterns(OpenshiftRequestContext context) { LOGGER.trace("Getting projects from indexPatterns..."); Set<Project> patterns = new HashSet<>(); SearchResponse response = pluginClient.search(context.getKibanaIndex(), INDICIES_TYPE, 1000, false); if (response.getHits() != null && response.getHits().getTotalHits() > 0) { for (SearchHit hit : response.getHits().getHits()) { String id = hit.getId(); LOGGER.trace("Evaluating pattern '{}'", id); Project project = getProjectFromIndexPattern(id); if (!project.getName().equals(id) || project.equals(ALL_ALIAS)) { LOGGER.trace("Adding project '{}'", project); patterns.add(project); } // else we found a user created index-pattern. Ignore } } else { LOGGER.debug("No index-mappings found in the kibana index '{}'", context.getKibanaIndex()); } return patterns; }
Example 4
Source File: NestedLoopsElasticExecutor.java From elasticsearch-sql with Apache License 2.0 | 5 votes |
private SearchHit getMergedHit(int currentCombinedResults, String t1Alias, String t2Alias, SearchHit hitFromFirstTable, SearchHit matchedHit) { onlyReturnedFields(matchedHit.getSourceAsMap(), nestedLoopsRequest.getSecondTable().getReturnedFields(),nestedLoopsRequest.getSecondTable().getOriginalSelect().isSelectAll()); SearchHit searchHit = new SearchHit(currentCombinedResults, hitFromFirstTable.getId() + "|" + matchedHit.getId(), new Text(hitFromFirstTable.getType() + "|" + matchedHit.getType()), hitFromFirstTable.getFields(), null); searchHit.sourceRef(hitFromFirstTable.getSourceRef()); searchHit.getSourceAsMap().clear(); searchHit.getSourceAsMap().putAll(hitFromFirstTable.getSourceAsMap()); mergeSourceAndAddAliases(matchedHit.getSourceAsMap(), searchHit, t1Alias, t2Alias); return searchHit; }
Example 5
Source File: ESDataExchangeImpl.java From youkefu with Apache License 2.0 | 5 votes |
/** * 批量删除,单次最大删除 10000条 * @param query * @param index * @param type * @throws Exception */ public void deleteByCon(QueryBuilder query ,String type) throws Exception { BulkRequestBuilder bulkRequest = UKDataContext.getTemplet().getClient().prepareBulk(); SearchResponse response = UKDataContext.getTemplet().getClient().prepareSearch(UKDataContext.CALLOUT_INDEX).setTypes(type) .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) .setQuery(query) .setFrom(0).setSize(10000).setExplain(true).execute().actionGet(); if(response.getHits().getTotalHits() > 0) { for(SearchHit hit : response.getHits()){ String id = hit.getId(); bulkRequest.add(UKDataContext.getTemplet().getClient().prepareDelete(UKDataContext.CALLOUT_INDEX, type, id).request()); } bulkRequest.get(); } }
Example 6
Source File: MinusExecutor.java From elasticsearch-sql with Apache License 2.0 | 5 votes |
private void fillMinusHitsFromResults(Set<ComperableHitResult> comperableHitResults) { int currentId = 1; List<SearchHit> minusHitsList = new ArrayList<>(); for(ComperableHitResult result : comperableHitResults){ ArrayList<Object> values = new ArrayList<Object>(); values.add(result); SearchHit originalHit = result.getOriginalHit(); SearchHit searchHit = new SearchHit(currentId,originalHit.getId(), new Text(originalHit.getType()), originalHit.getFields(), null); searchHit.sourceRef(originalHit.getSourceRef()); searchHit.getSourceAsMap().clear(); Map<String, Object> sourceAsMap = result.getFlattenMap(); for(Map.Entry<String,String> entry : this.builder.getFirstTableFieldToAlias().entrySet()){ if(sourceAsMap.containsKey(entry.getKey())){ Object value = sourceAsMap.get(entry.getKey()); sourceAsMap.remove(entry.getKey()); sourceAsMap.put(entry.getValue(),value); } } searchHit.getSourceAsMap().putAll(sourceAsMap); currentId++; minusHitsList.add(searchHit); } int totalSize = currentId - 1; SearchHit[] unionHitsArr = minusHitsList.toArray(new SearchHit[totalSize]); this.minusHits = new SearchHits(unionHitsArr, new TotalHits(totalSize, TotalHits.Relation.EQUAL_TO), 1.0f); }
Example 7
Source File: ElasticJoinExecutor.java From elasticsearch-sql with Apache License 2.0 | 5 votes |
protected SearchHit createUnmachedResult( List<Field> secondTableReturnedFields, int docId, String t1Alias, String t2Alias, SearchHit hit) { String unmatchedId = hit.getId() + "|0"; Text unamatchedType = new Text(hit.getType() + "|null"); SearchHit searchHit = new SearchHit(docId, unmatchedId, unamatchedType, hit.getFields(), null); searchHit.sourceRef(hit.getSourceRef()); searchHit.getSourceAsMap().clear(); searchHit.getSourceAsMap().putAll(hit.getSourceAsMap()); Map<String,Object> emptySecondTableHitSource = createNullsSource(secondTableReturnedFields); mergeSourceAndAddAliases(emptySecondTableHitSource, searchHit,t1Alias,t2Alias); return searchHit; }
Example 8
Source File: FeatureLoader.java From pyramid with Apache License 2.0 | 5 votes |
public static void loadNgramFeatureBinary(ESIndex index, DataSet dataSet, Ngram feature, IdTranslator idTranslator, String docFilter){ int featureIndex = feature.getIndex(); SearchResponse response = index.spanNear(feature, docFilter, idTranslator.numData()); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit: hits){ String indexId = hit.getId(); float score = hit.getScore(); int algorithmId = idTranslator.toIntId(indexId); if (score>0){ score=1; } dataSet.setFeatureValue(algorithmId,featureIndex,score); } }
Example 9
Source File: UserDao.java From usergrid with Apache License 2.0 | 5 votes |
/** * Creates a User object from given elastic search query * * @param hit Elasticsearch <code>SearchHit</code> that holds the user information * @return <code>User</code> object corresponding to the given query result */ private static User toUser( SearchHit hit ) { Map<String, Object> json = hit.getSource(); return new User( hit.getId(), Util.getString( json, "password" ) ); }
Example 10
Source File: FeatureLoader.java From pyramid with Apache License 2.0 | 5 votes |
private static void loadCodeDesFeature(ESIndex index, DataSet dataSet, Feature feature, IdTranslator idTranslator, String docFilter){ int featureIndex = feature.getIndex(); CodeDescription codeDescription = (CodeDescription)(feature); SearchResponse response = index.minimumShouldMatch(codeDescription.getDescription(), codeDescription.getField(), codeDescription.getPercentage(), idTranslator.numData(), docFilter); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit: hits){ String indexId = hit.getId(); float score = hit.getScore(); int algorithmId = idTranslator.toIntId(indexId); dataSet.setFeatureValue(algorithmId,featureIndex,score); } }
Example 11
Source File: RunResultDao.java From usergrid with Apache License 2.0 | 5 votes |
private static RunResult toRunResult( SearchHit hit ) { Map<String, Object> json = hit.getSource(); return new BasicRunResult( hit.getId(), Util.getString( json, "runId" ), Util.getInt( json, "runCount" ), Util.getInt( json, "runTime" ), Util.getInt( json, "ignoreCount" ), Util.getInt( json, "failureCount" ), Util.getString( json, "failures" ) ); }
Example 12
Source File: FeatureLoader.java From pyramid with Apache License 2.0 | 5 votes |
private static void loadNgramFeatureFrequency(ESIndex index, DataSet dataSet, Ngram feature, IdTranslator idTranslator, String docFilter){ int featureIndex = feature.getIndex(); SearchResponse response = index.spanNearFrequency(feature, docFilter, idTranslator.numData()); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit: hits){ String indexId = hit.getId(); float score = hit.getScore(); int algorithmId = idTranslator.toIntId(indexId); dataSet.setFeatureValue(algorithmId,featureIndex,score); } }
Example 13
Source File: FeatureLoader.java From pyramid with Apache License 2.0 | 5 votes |
private static void loadNgramFeatureOriginal(ESIndex index, DataSet dataSet, Ngram feature, IdTranslator idTranslator, String docFilter){ int featureIndex = feature.getIndex(); SearchResponse response = index.spanNear(feature, docFilter, idTranslator.numData()); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit: hits){ String indexId = hit.getId(); float score = hit.getScore(); int algorithmId = idTranslator.toIntId(indexId); dataSet.setFeatureValue(algorithmId,featureIndex,score); } }
Example 14
Source File: ESQueryResponseHandler.java From search-spring-boot-starter with Apache License 2.0 | 5 votes |
/** * 检索文档处理 * * @param esObject 检索请求参数 * @param searchResponse 检索请求响应结果 * @param esResponse ES 检索响应 */ private void documentHandler(QueryESObject esObject, final SearchResponse searchResponse, ESResponse esResponse) { final SearchHits hits = searchResponse.getHits(); if (null == hits) { return; } // 设置分页响应参数 if (esObject.pageSearch()) { final PageCondition pageCondition = esObject.getPageCondition(); pageCondition.setTotalDocs(hits.getTotalHits()); esResponse.setPageCondition(pageCondition); } final SearchHit[] searchHits = hits.getHits(); if (null == searchHits || searchHits.length == 0) { return; } List<ESDocument> esDocuments = new ArrayList<>(); for (SearchHit searchHit : searchHits) { Map dataMap = null; try { dataMap = ESSearchConvertor.json2Object(searchHit.getSourceAsString(), Map.class); } catch (IOException e) { e.printStackTrace(); } if (dataMap != null && !dataMap.isEmpty()) { ESDocument esDocument = new ESDocument(); final String docId = searchHit.getId(); esDocument.setDataMap(dataMap); esDocuments.add(esDocument); } } esResponse.setEsDocuments(esDocuments); }
Example 15
Source File: GenTermValuesHandler.java From elasticsearch-taste with Apache License 2.0 | 4 votes |
@Override public void onResponse(final SearchResponse response) { if (mTVListener != null) { try { mTVListener.await(); } catch (final InterruptedException e) { if (logger.isDebugEnabled()) { logger.debug("Interrupted.", e); } } } if (interrupted) { return; } final SearchHits searchHits = response.getHits(); final SearchHit[] hits = searchHits.getHits(); if (hits.length == 0) { scrollSearchGate.countDown(); shutdown(); } else { final Map<String, DocInfo> idMap = new HashMap<>(hits.length); final MultiTermVectorsRequestBuilder requestBuilder = client .prepareMultiTermVectors(); for (final SearchHit hit : hits) { final String id = hit.getId(); final SearchHitField searchHitField = hit.field(idField); if (searchHitField != null) { idMap.put(id, new DocInfo((String) searchHitField.getValue(), hit.getSource())); } final TermVectorsRequest termVectorRequest = new TermVectorsRequest( sourceIndex, sourceType, id); termVectorRequest.selectedFields(sourceFields); requestBuilder.add(termVectorRequest); } mTVListener = new MultiTermVectorsListener(numOfThreads, requestHandlers, eventParams, idMap, executor, logger); requestBuilder.execute(mTVListener); client.prepareSearchScroll(response.getScrollId()) .setScroll(new TimeValue(keepAlive.longValue())) .execute(this); } }
Example 16
Source File: ElasticsearchDataStructure.java From rdf4j with BSD 3-Clause "New" or "Revised" License | 4 votes |
@Override public CloseableIteration<? extends ExtensibleStatement, SailException> getStatements(Resource subject, IRI predicate, Value object, boolean inferred, Resource... context) { QueryBuilder queryBuilder = getQueryBuilder(subject, predicate, object, inferred, context); return new LookAheadIteration<ExtensibleStatement, SailException>() { CloseableIteration<SearchHit, RuntimeException> iterator = ElasticsearchHelper .getScrollingIterator(queryBuilder, clientProvider.getClient(), index, scrollTimeout); @Override protected ExtensibleStatement getNextElement() throws SailException { ExtensibleStatement next = null; while (next == null && iterator.hasNext()) { SearchHit nextSearchHit = iterator.next(); Map<String, Object> sourceAsMap = nextSearchHit.getSourceAsMap(); String id = nextSearchHit.getId(); ExtensibleStatement statement = sourceToStatement(sourceAsMap, id, subject, predicate, object); // we use hash to lookup the object value because the object can be bigger than what elasticsearch // allows as max for keyword (32766 bytes), so it needs to be stored in a text field that is not // index. The hash is stored in an integer field and is index. The code below does hash collision // check. if (object != null && object.stringValue().hashCode() == statement.getObject().stringValue().hashCode() && !object.equals(statement.getObject())) { continue; } next = statement; } return next; } @Override public void remove() throws SailException { throw new IllegalStateException("Does not support removing from iterator"); } @Override protected void handleClose() throws SailException { super.handleClose(); iterator.close(); } }; }
Example 17
Source File: HashJoinElasticExecutor.java From elasticsearch-sql with Apache License 2.0 | 4 votes |
private List<SearchHit> createCombinedResults( TableInJoinRequestBuilder secondTableRequest) { List<SearchHit> combinedResult = new ArrayList<>(); int resultIds = 0; int totalLimit = this.requestBuilder.getTotalLimit(); Integer hintLimit = secondTableRequest.getHintLimit(); SearchResponse searchResponse; boolean finishedScrolling; if (hintLimit != null && hintLimit < MAX_RESULTS_ON_ONE_FETCH) { searchResponse = secondTableRequest.getRequestBuilder().setSize(hintLimit).get(); finishedScrolling = true; } else { searchResponse = secondTableRequest.getRequestBuilder() .setScroll(new TimeValue(60000)) .setSize(MAX_RESULTS_ON_ONE_FETCH).get(); //es5.0 no need to scroll again! // searchResponse = client.prepareSearchScroll(searchResponse.getScrollId()).setScroll(new TimeValue(600000)).get(); finishedScrolling = false; } updateMetaSearchResults(searchResponse); boolean limitReached = false; int fetchedSoFarFromSecondTable = 0; while (!limitReached) { SearchHit[] secondTableHits = searchResponse.getHits().getHits(); fetchedSoFarFromSecondTable += secondTableHits.length; for (SearchHit secondTableHit : secondTableHits) { if (limitReached) break; //todo: need to run on comparisons. for each comparison check if exists and add. HashMap<String, List<Map.Entry<Field, Field>>> comparisons = this.hashJoinComparisonStructure.getComparisons(); for (Map.Entry<String, List<Map.Entry<Field, Field>>> comparison : comparisons.entrySet()) { String comparisonID = comparison.getKey(); List<Map.Entry<Field, Field>> t1ToT2FieldsComparison = comparison.getValue(); String key = getComparisonKey(t1ToT2FieldsComparison, secondTableHit, false, null); SearchHitsResult searchHitsResult = this.hashJoinComparisonStructure.searchForMatchingSearchHits(comparisonID, key); if (searchHitsResult != null && searchHitsResult.getSearchHits().size() > 0) { searchHitsResult.setMatchedWithOtherTable(true); List<SearchHit> searchHits = searchHitsResult.getSearchHits(); for (SearchHit matchingHit : searchHits) { String combinedId = matchingHit.getId() + "|" + secondTableHit.getId(); //in order to prevent same matching when using OR on hashJoins. if(this.alreadyMatched.contains(combinedId)){ continue; } else { this.alreadyMatched.add(combinedId); } Map<String,Object> copiedSource = new HashMap<String,Object>(); copyMaps(copiedSource,secondTableHit.getSourceAsMap()); onlyReturnedFields(copiedSource, secondTableRequest.getReturnedFields(),secondTableRequest.getOriginalSelect().isSelectAll()); SearchHit searchHit = new SearchHit(matchingHit.docId(), combinedId, new Text(matchingHit.getType() + "|" + secondTableHit.getType()), matchingHit.getFields(), null); searchHit.sourceRef(matchingHit.getSourceRef()); searchHit.getSourceAsMap().clear(); searchHit.getSourceAsMap().putAll(matchingHit.getSourceAsMap()); String t1Alias = requestBuilder.getFirstTable().getAlias(); String t2Alias = requestBuilder.getSecondTable().getAlias(); mergeSourceAndAddAliases(copiedSource, searchHit, t1Alias, t2Alias); combinedResult.add(searchHit); resultIds++; if (resultIds >= totalLimit) { limitReached = true; break; } } } } } if (!finishedScrolling) { if (secondTableHits.length > 0 && (hintLimit == null || fetchedSoFarFromSecondTable >= hintLimit)) { searchResponse = client.prepareSearchScroll(searchResponse.getScrollId()).setScroll(new TimeValue(600000)).execute().actionGet(); } else break; } else { break; } } return combinedResult; }
Example 18
Source File: SearchApiMain.java From elasticsearch-pool with Apache License 2.0 | 4 votes |
public static void searchApi() throws IOException { RestHighLevelClient client = HighLevelClient.getInstance(); try { SearchRequest searchRequest = new SearchRequest("jingma2_test");//限定index searchRequest.types("testlog");//限定type SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); /*查询所有记录*/ // searchSourceBuilder.query(QueryBuilders.matchAllQuery()); /*根据匹配查询*/ QueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("name", "风雷"); /*设置中文分词器*/ // ((MatchQueryBuilder) matchQueryBuilder).analyzer("ik"); // ((MatchQueryBuilder) matchQueryBuilder).analyzer("ik_max_word"); // ((MatchQueryBuilder) matchQueryBuilder).analyzer("ik_smart"); // ((MatchQueryBuilder) matchQueryBuilder).analyzer("standard"); searchSourceBuilder.query(matchQueryBuilder); /*限定查询条件和查询条数*/ // searchSourceBuilder.query(QueryBuilders.termQuery("name", "风雷")); searchSourceBuilder.from(0); searchSourceBuilder.size(5); // searchSourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); /*限定查询结果排序*/ // searchSourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // searchSourceBuilder.sort(new FieldSortBuilder("age").order(SortOrder.ASC)); searchRequest.source(searchSourceBuilder); SearchResponse searchResponse = client.search(searchRequest); System.out.println(searchResponse); SearchHits hits = searchResponse.getHits(); long totalHits = hits.getTotalHits(); float maxScore = hits.getMaxScore(); SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { String index = hit.getIndex(); String type = hit.getType(); String id = hit.getId(); float score = hit.getScore(); String sourceAsString = hit.getSourceAsString(); System.out.println(sourceAsString); // Map<String, Object> sourceAsMap = hit.getSourceAsMap(); } }finally { HighLevelClient.close(); } }
Example 19
Source File: ElasticSearchProvider.java From inception with Apache License 2.0 | 4 votes |
@Override public List<ExternalSearchResult> executeQuery(DocumentRepository aRepository, ElasticSearchProviderTraits aTraits, String aQuery) throws IOException { List<ExternalSearchResult> results = new ArrayList<>(); try (RestHighLevelClient client = makeClient(aTraits)) { HighlightBuilder highlightBuilder = new HighlightBuilder() .field(new HighlightBuilder.Field(aTraits.getDefaultField()) .highlighterType("unified")); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() .fetchSource(null, ELASTIC_HIT_DOC_KEY) .highlighter(highlightBuilder) .size(aTraits.getResultSize()); QueryBuilder qb = QueryBuilders.simpleQueryStringQuery(aQuery) .field(aTraits.getDefaultField()); if (aTraits.isRandomOrder()) { RandomScoreFunctionBuilder randomFunc = ScoreFunctionBuilders.randomFunction(); randomFunc.seed(aTraits.getSeed()); searchSourceBuilder.query(QueryBuilders.functionScoreQuery( QueryBuilders.constantScoreQuery(qb).boost(1.0f), randomFunc)); } else { searchSourceBuilder.query(qb); } SearchRequest searchRequest = new SearchRequest(aTraits.getIndexName()) .source(searchSourceBuilder); SearchResponse response = client.search(searchRequest); for (SearchHit hit: response.getHits().getHits()) { if (hit.getSourceAsMap() == null || hit.getSourceAsMap().get(ELASTIC_HIT_METADATA_KEY) == null) { log.warn("Result has no document metadata: " + hit); continue; } ExternalSearchResult result = new ExternalSearchResult(aRepository, aTraits.getIndexName(), hit.getId()); // If the order is random, then the score doesn't reflect the quality, so we do not // forward it to the user if (!aTraits.isRandomOrder()) { result.setScore((double) hit.getScore()); } fillResultWithMetadata(result, hit.getSourceAsMap()); if (hit.getHighlightFields().size() != 0) { // There are highlights, set them in the result List<ExternalSearchHighlight> highlights = new ArrayList<>(); if (hit.getHighlightFields().get(aTraits.getDefaultField()) != null) { for (Text highlight : hit.getHighlightFields() .get(aTraits.getDefaultField()) .getFragments()) { highlights.add(new ExternalSearchHighlight(highlight.toString())); } } result.setHighlights(highlights); } results.add(result); } } return results; }
Example 20
Source File: Select.java From code with Apache License 2.0 | 4 votes |
public void after() throws IOException { //3.获取查询结果 SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT); // 查询花费时间,单位是毫秒 TimeValue took = searchResponse.getTook(); // 分片信息 int total = searchResponse.getTotalShards(); int success = searchResponse.getSuccessfulShards(); int skipped = searchResponse.getSkippedShards(); int failed = searchResponse.getFailedShards(); // 搜索结果总览对象 SearchHits searchHits = searchResponse.getHits(); // 搜索到的总条数 long totalHits = searchHits.getTotalHits(); // 所有结果中文档得分的最高分 float maxScore = searchHits.getMaxScore(); System.out.println("took:" + took); System.out.println("_shards:"); System.out.println(" total:" + total); System.out.println(" success:" + success); System.out.println(" skipped:" + skipped); System.out.println(" failed:" + failed); System.out.println("hits:"); System.out.println(" total:" + totalHits); System.out.println(" max_score:" + maxScore); System.out.println(" hits:"); // 搜索结果的文档对象数组,每个元素是一条搜索到的文档信息 SearchHit[] hits = searchHits.getHits(); for (SearchHit hit : hits) { // 索引库 String index = hit.getIndex(); // 文档类型 String type = hit.getType(); // 文档id String id = hit.getId(); // 文档得分 float score = hit.getScore(); // 文档的源数据 String source = hit.getSourceAsString(); System.out.println(" _index:" + index); System.out.println(" _type:" + type); System.out.println(" _id:" + id); System.out.println(" _score:" + score); System.out.println(" _source:" + source); } restHighLevelClient.close(); }