org.elasticsearch.search.internal.SearchContext Java Examples
The following examples show how to use
org.elasticsearch.search.internal.SearchContext.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ReverseNestedParser.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { String path = null; XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_STRING) { if ("path".equals(currentFieldName)) { path = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", parser.getTokenLocation()); } } return new ReverseNestedAggregator.Factory(aggregationName, path); }
Example #2
Source File: FieldDataFieldsFetchSubPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void hitExecute(SearchContext context, HitContext hitContext) { for (FieldDataFieldsContext.FieldDataField field : context.getFetchSubPhaseContext(CONTEXT_FACTORY).fields()) { if (hitContext.hit().fieldsOrNull() == null) { hitContext.hit().fields(new HashMap<String, SearchHitField>(2)); } SearchHitField hitField = hitContext.hit().fields().get(field.name()); if (hitField == null) { hitField = new InternalSearchHitField(field.name(), new ArrayList<>(2)); hitContext.hit().fields().put(field.name(), hitField); } MappedFieldType fieldType = context.mapperService().smartNameFieldType(field.name()); if (fieldType != null) { AtomicFieldData data = context.fieldData().getForField(fieldType).load(hitContext.readerContext()); ScriptDocValues values = data.getScriptValues(); values.setNextDocId(hitContext.docId()); hitField.values().addAll(values.getValues()); } } }
Example #3
Source File: FetchSourceSubPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void hitExecute(SearchContext context, HitContext hitContext) { FetchSourceContext fetchSourceContext = context.fetchSourceContext(); assert fetchSourceContext.fetchSource(); if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { hitContext.hit().sourceRef(context.lookup().source().internalSourceRef()); return; } SourceLookup source = context.lookup().source(); Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes()); try { final int initialCapacity = Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); XContentBuilder builder = new XContentBuilder(context.lookup().source().sourceContentType().xContent(), streamOutput); builder.value(value); hitContext.hit().sourceRef(builder.bytes()); } catch (IOException e) { throw new ElasticsearchException("Error filtering source", e); } }
Example #4
Source File: DateHierarchyAggregator.java From elasticsearch-aggregation-pathhierarchy with MIT License | 6 votes |
public DateHierarchyAggregator( String name, AggregatorFactories factories, SearchContext context, ValuesSource.Numeric valuesSource, BucketOrder order, long minDocCount, BucketCountThresholds bucketCountThresholds, List<DateHierarchyAggregationBuilder.RoundingInfo> roundingsInfo, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData ) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.roundingsInfo = roundingsInfo; this.minDocCount = minDocCount; bucketOrds = new BytesRefHash(1, context.bigArrays()); this.order = InternalOrder.validate(order, this); this.bucketCountThresholds = bucketCountThresholds; }
Example #5
Source File: HighlightUtils.java From Elasticsearch with Apache License 2.0 | 6 votes |
static List<Object> loadFieldValues(SearchContextHighlight.Field field, FieldMapper mapper, SearchContext searchContext, FetchSubPhase.HitContext hitContext) throws IOException { //percolator needs to always load from source, thus it sets the global force source to true boolean forceSource = searchContext.highlight().forceSource(field); List<Object> textsToHighlight; if (!forceSource && mapper.fieldType().stored()) { CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(ImmutableSet.of(mapper.fieldType().names().indexName()), false); hitContext.reader().document(hitContext.docId(), fieldVisitor); textsToHighlight = fieldVisitor.fields().get(mapper.fieldType().names().indexName()); if (textsToHighlight == null) { // Can happen if the document doesn't have the field to highlight textsToHighlight = Collections.emptyList(); } } else { SourceLookup sourceLookup = searchContext.lookup().source(); sourceLookup.setSegmentAndDocument(hitContext.readerContext(), hitContext.docId()); textsToHighlight = sourceLookup.extractRawValues(hitContext.getSourcePath(mapper.fieldType().names().fullName())); } assert textsToHighlight != null; return textsToHighlight; }
Example #6
Source File: ExplainFetchSubPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void hitExecute(SearchContext context, HitContext hitContext) { try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreSearchContext rescore : context.rescore()) { explanation = rescore.rescorer().explain(topLevelDocId, context, rescore, explanation); } // we use the top level doc id, since we work with the top level searcher hitContext.hit().explanation(explanation); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().type() + "#" + hitContext.hit().id() + "]", e); } finally { context.clearReleasables(SearchContext.Lifetime.COLLECTION); } }
Example #7
Source File: VersionFetchSubPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void hitExecute(SearchContext context, HitContext hitContext) { // it might make sense to cache the TermDocs on a shared fetch context and just skip here) // it is going to mean we work on the high level multi reader and not the lower level reader as is // the case below... long version; try { BytesRef uid = Uid.createUidAsBytes(hitContext.hit().type(), hitContext.hit().id()); version = Versions.loadVersion( hitContext.readerContext().reader(), new Term(UidFieldMapper.NAME, uid) ); } catch (IOException e) { throw new ElasticsearchException("Could not query index for _version", e); } if (version < 0) { version = -1; } hitContext.hit().version(version); }
Example #8
Source File: QueryPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void execute(SearchContext searchContext) throws QueryPhaseExecutionException { // Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F // request, preProcess is called on the DFS phase phase, this is why we pre-process them // here to make sure it happens during the QUERY phase aggregationPhase.preProcess(searchContext); boolean rescore = execute(searchContext, searchContext.searcher()); if (rescore) { // only if we do a regular search rescorePhase.execute(searchContext); } suggestPhase.execute(searchContext); aggregationPhase.execute(searchContext); if (searchContext.getProfilers() != null) { List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); searchContext.queryResult().profileResults(shardResults); } }
Example #9
Source File: InnerHitsQueryParserHelper.java From Elasticsearch with Apache License 2.0 | 6 votes |
public InnerHitsSubSearchContext parse(QueryParseContext parserContext) throws IOException, QueryParsingException { String fieldName = null; XContentParser.Token token; String innerHitName = null; SubSearchContext subSearchContext = new SubSearchContext(SearchContext.current()); try { XContentParser parser = parserContext.parser(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token.isValue()) { if ("name".equals(fieldName)) { innerHitName = parser.textOrNull(); } else { parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } else { parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } } catch (Exception e) { throw new QueryParsingException(parserContext, "Failed to parse [_inner_hits]", e); } return new InnerHitsSubSearchContext(innerHitName, subSearchContext); }
Example #10
Source File: ShardSearchStats.java From Elasticsearch with Apache License 2.0 | 5 votes |
public void onPreQueryPhase(SearchContext searchContext) { totalStats.queryCurrent.inc(); if (searchContext.groupStats() != null) { for (int i = 0; i < searchContext.groupStats().size(); i++) { groupStats(searchContext.groupStats().get(i)).queryCurrent.inc(); } } }
Example #11
Source File: SearchSlowLog.java From Elasticsearch with Apache License 2.0 | 5 votes |
void onQueryPhase(SearchContext context, long tookInNanos) { if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) { queryLogger.warn("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat)); } else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) { queryLogger.info("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat)); } else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) { queryLogger.debug("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat)); } else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) { queryLogger.trace("{}", new SlowLogSearchContextPrinter(context, tookInNanos, reformat)); } }
Example #12
Source File: PercentilesParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override protected AggregatorFactory buildFactory(SearchContext context, String aggregationName, ValuesSourceConfig<Numeric> valuesSourceConfig, double[] keys, PercentilesMethod method, Double compression, Integer numberOfSignificantValueDigits, boolean keyed) { if (keys == null) { keys = DEFAULT_PERCENTS; } if (method == PercentilesMethod.TDIGEST) { return new TDigestPercentilesAggregator.Factory(aggregationName, valuesSourceConfig, keys, compression, keyed); } else if (method == PercentilesMethod.HDR) { return new HDRPercentilesAggregator.Factory(aggregationName, valuesSourceConfig, keys, numberOfSignificantValueDigits, keyed); } else { throw new AssertionError(); } }
Example #13
Source File: FetchPhase.java From Elasticsearch with Apache License 2.0 | 5 votes |
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException { if (context.mapperService().hasNested()) { BitSet bits = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()).getBitSet(subReaderContext); if (!bits.get(subDocId)) { return bits.nextSetBit(subDocId); } } return -1; }
Example #14
Source File: SignificantTermsAggregatorFactory.java From Elasticsearch with Apache License 2.0 | 5 votes |
public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig valueSourceConfig, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, String executionHint, Query filter, SignificanceHeuristic significanceHeuristic) { super(name, SignificantStringTerms.TYPE.name(), valueSourceConfig); this.bucketCountThresholds = bucketCountThresholds; this.includeExclude = includeExclude; this.executionHint = executionHint; this.significanceHeuristic = significanceHeuristic; if (!valueSourceConfig.unmapped()) { this.indexedFieldName = config.fieldContext().field(); fieldType = SearchContext.current().smartNameFieldType(indexedFieldName); } this.filter = filter; }
Example #15
Source File: GeoBoundsParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { ValuesSourceParser<GeoPoint> vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoBounds.TYPE, context) .targetValueType(ValueType.GEOPOINT) .formattable(true) .build(); boolean wrapLongitude = true; XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (vsParser.token(currentFieldName, token, parser)) { continue; } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("wrap_longitude".equals(currentFieldName) || "wrapLongitude".equals(currentFieldName)) { wrapLongitude = parser.booleanValue(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } return new GeoBoundsAggregator.Factory(aggregationName, vsParser.config(), wrapLongitude); }
Example #16
Source File: QueryRescorer.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet) { try { context.searcher().createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); } catch (IOException e) { throw new IllegalStateException("Failed to extract terms", e); } }
Example #17
Source File: PercentageScore.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, QueryParsingException { // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [percentage] significance heuristic. expected an empty object, but got [{}] instead", parser.currentToken()); } return new PercentageScore(); }
Example #18
Source File: QueryBinaryParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void parse(XContentParser parser, SearchContext context) throws Exception { byte[] querySource = parser.binaryValue(); try (XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource)) { context.parsedQuery(context.queryParserService().parse(qSourceParser)); } }
Example #19
Source File: TrackScoresParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void parse(XContentParser parser, SearchContext context) throws Exception { XContentParser.Token token = parser.currentToken(); if (token.isValue()) { context.trackScores(parser.booleanValue()); } }
Example #20
Source File: BaseAggregationBuilder.java From elasticsearch-linear-regression with Apache License 2.0 | 5 votes |
@Override protected final MultiValuesSourceAggregatorFactory<ValuesSource.Numeric, ?> innerBuild( final SearchContext context, final List<NamedValuesSourceConfigSpec<Numeric>> configs, final AggregatorFactory<?> parent, final AggregatorFactories.Builder subFactoriesBuilder) throws IOException { return innerInnerBuild(context, configs, this.multiValueMode, parent, subFactoriesBuilder); }
Example #21
Source File: SortParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void parse(XContentParser parser, SearchContext context) throws Exception { XContentParser.Token token = parser.currentToken(); List<SortField> sortFields = new ArrayList<>(2); if (token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { addCompoundSortField(parser, context, sortFields); } else if (token == XContentParser.Token.VALUE_STRING) { addSortField(context, sortFields, parser.text(), false, null, null, null, null); } else { throw new IllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); } } } else if (token == XContentParser.Token.VALUE_STRING) { addSortField(context, sortFields, parser.text(), false, null, null, null, null); } else if (token == XContentParser.Token.START_OBJECT) { addCompoundSortField(parser, context, sortFields); } else { throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); } if (!sortFields.isEmpty()) { // optimize if we just sort on score non reversed, we don't really need sorting boolean sort; if (sortFields.size() > 1) { sort = true; } else { SortField sortField = sortFields.get(0); if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) { sort = false; } else { sort = true; } } if (sort) { context.sort(new Sort(sortFields.toArray(new SortField[sortFields.size()]))); } } }
Example #22
Source File: ParentQuery.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); ChildWeight childWeight; boolean releaseCollectorResource = true; ParentOrdAndScoreCollector collector = null; IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal((DirectoryReader)searcher.getIndexReader()); if (globalIfd == null) { // No docs of the specified type don't exist on this shard return new BooleanQuery.Builder().build().createWeight(searcher, needsScores); } try { collector = new ParentOrdAndScoreCollector(sc, globalIfd, parentType); searcher.search(parentQuery, collector); if (collector.parentCount() == 0) { return new BooleanQuery.Builder().build().createWeight(searcher, needsScores); } childWeight = new ChildWeight(this, parentQuery.createWeight(searcher, needsScores), childrenFilter, collector, globalIfd); releaseCollectorResource = false; } finally { if (releaseCollectorResource) { // either if we run into an exception or if we return early Releasables.close(collector); } } sc.addReleasable(collector, Lifetime.COLLECTION); return childWeight; }
Example #23
Source File: InnerHitsParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
private InnerHitsContext.ParentChildInnerHits parseParentChild(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String type) throws Exception { ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); DocumentMapper documentMapper = searchContext.mapperService().documentMapper(type); if (documentMapper == null) { throw new IllegalArgumentException("type [" + type + "] doesn't exist"); } return new InnerHitsContext.ParentChildInnerHits(parseResult.context(), parseResult.query(), parseResult.childInnerHits(), parseContext.mapperService(), documentMapper); }
Example #24
Source File: DecayFunctionParser.java From Elasticsearch with Apache License 2.0 | 5 votes |
private AbstractDistanceScoreFunction parseDateVariable(String fieldName, XContentParser parser, QueryParseContext parseContext, DateFieldMapper.DateFieldType dateFieldType, MultiValueMode mode) throws IOException { XContentParser.Token token; String parameterName = null; String scaleString = null; String originString = null; String offsetString = "0d"; double decay = 0.5; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { parameterName = parser.currentName(); } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) { scaleString = parser.text(); } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) { originString = parser.text(); } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) { decay = parser.doubleValue(); } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) { offsetString = parser.text(); } else { throw new ElasticsearchParseException("parameter [{}] not supported!", parameterName); } } long origin = SearchContext.current().nowInMillis(); if (originString != null) { origin = dateFieldType.parseToMilliseconds(originString, false, null, null); } if (scaleString == null) { throw new ElasticsearchParseException("[{}] must be set for date fields.", DecayFunctionBuilder.SCALE); } TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale"); double scale = val.getMillis(); val = TimeValue.parseTimeValue(offsetString, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset"); double offset = val.getMillis(); IndexNumericFieldData numericFieldData = parseContext.getForField(dateFieldType); return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData, mode); }
Example #25
Source File: DfsPhase.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void execute(SearchContext context) { final ObjectHashSet<Term> termsSet = new ObjectHashSet<>(); try { context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet)); for (RescoreSearchContext rescoreContext : context.rescore()) { rescoreContext.rescorer().extractTerms(context, rescoreContext, new DelegateSet(termsSet)); } Term[] terms = termsSet.toArray(Term.class); TermStatistics[] termStatistics = new TermStatistics[terms.length]; IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { // LUCENE 4 UPGRADE: cache TermContext? TermContext termContext = TermContext.build(indexReaderContext, terms[i]); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); } ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap(); for (Term term : terms) { assert term.field() != null : "field is null"; if (!fieldStatistics.containsKey(term.field())) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); fieldStatistics.put(term.field(), collectionStatistics); } } context.dfsResult().termsStatistics(terms, termStatistics) .fieldStatistics(fieldStatistics) .maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e); } finally { termsSet.clear(); // don't hold on to terms } }
Example #26
Source File: ChildrenQuery.java From Elasticsearch with Apache License 2.0 | 5 votes |
protected ParentCollector(IndexParentChildFieldData globalIfd, SearchContext searchContext, String parentType) { this.globalIfd = globalIfd; this.searchContext = searchContext; this.bigArrays = searchContext.bigArrays(); this.parentIdxs = new LongHash(512, bigArrays); this.parentType = parentType; }
Example #27
Source File: ChildrenConstantScoreQuery.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal((DirectoryReader)searcher.getIndexReader()); final long valueCount; List<LeafReaderContext> leaves = searcher.getIndexReader().leaves(); if (globalIfd == null || leaves.isEmpty()) { return new BooleanQuery.Builder().build().createWeight(searcher, needsScores); } else { AtomicParentChildFieldData afd = globalIfd.load(leaves.get(0)); SortedDocValues globalValues = afd.getOrdinalsValues(parentType); valueCount = globalValues.getValueCount(); } if (valueCount == 0) { return new BooleanQuery.Builder().build().createWeight(searcher, needsScores); } ParentOrdCollector collector = new ParentOrdCollector(globalIfd, valueCount, parentType); searcher.search(childQuery, collector); final long remaining = collector.foundParents(); if (remaining == 0) { return new BooleanQuery.Builder().build().createWeight(searcher, needsScores); } Filter shortCircuitFilter = null; if (remaining <= shortCircuitParentDocSet) { shortCircuitFilter = ParentIdsFilter.createShortCircuitFilter( nonNestedDocsFilter, sc, parentType, collector.values, collector.parentOrds, remaining ); } return new ParentWeight(this, parentFilter, globalIfd, shortCircuitFilter, collector, remaining); }
Example #28
Source File: TerminateAfterParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void parse(XContentParser parser, SearchContext context) throws Exception { XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.VALUE_NUMBER) { int terminateAfterCount = parser.intValue(); if (terminateAfterCount <= 0) { throw new IllegalArgumentException("terminateAfter must be > 0"); } context.terminateAfter(parser.intValue()); } }
Example #29
Source File: InnerHitsParseElement.java From Elasticsearch with Apache License 2.0 | 5 votes |
private InnerHitsContext.NestedInnerHits parseNested(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String nestedPath) throws Exception { ObjectMapper objectMapper = searchContext.getObjectMapper(nestedPath); if (objectMapper == null) { throw new IllegalArgumentException("path [" + nestedPath +"] doesn't exist"); } if (objectMapper.nested().isNested() == false) { throw new IllegalArgumentException("path [" + nestedPath +"] isn't nested"); } ObjectMapper parentObjectMapper = parseContext.nestedScope().nextLevel(objectMapper); ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); parseContext.nestedScope().previousLevel(); return new InnerHitsContext.NestedInnerHits(parseResult.context(), parseResult.query(), parseResult.childInnerHits(), parentObjectMapper, objectMapper); }
Example #30
Source File: QueryRescorer.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public TopDocs rescore(TopDocs topDocs, SearchContext context, RescoreSearchContext rescoreContext) throws IOException { assert rescoreContext != null; if (topDocs == null || topDocs.totalHits == 0 || topDocs.scoreDocs.length == 0) { return topDocs; } final QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; org.apache.lucene.search.Rescorer rescorer = new org.apache.lucene.search.QueryRescorer(rescore.query()) { @Override protected float combine(float firstPassScore, boolean secondPassMatches, float secondPassScore) { if (secondPassMatches) { return rescore.scoreMode.combine(firstPassScore * rescore.queryWeight(), secondPassScore * rescore.rescoreQueryWeight()); } // TODO: shouldn't this be up to the ScoreMode? I.e., we should just invoke ScoreMode.combine, passing 0.0f for the // secondary score? return firstPassScore * rescore.queryWeight(); } }; // First take top slice of incoming docs, to be rescored: TopDocs topNFirstPass = topN(topDocs, rescoreContext.window()); // Rescore them: TopDocs rescored = rescorer.rescore(context.searcher(), topNFirstPass, rescoreContext.window()); // Splice back to non-topN hits and resort all of them: return combine(topDocs, rescored, (QueryRescoreContext) rescoreContext); }