Java Code Examples for org.apache.lucene.search.Weight#scorer()
The following examples show how to use
org.apache.lucene.search.Weight#scorer() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SecureRealTimeGetComponent.java From incubator-sentry with Apache License 2.0 | 6 votes |
/** * @param doc SolrDocument to check * @param idField field where the id is stored * @param fieldType type of id field * @param filterQuery Query to filter by * @param searcher SolrIndexSearcher on which to apply the filter query * @returns the internal docid, or -1 if doc is not found or doesn't match filter */ private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType, Query filterQuery, SolrIndexSearcher searcher) throws IOException { int docid = -1; Field f = (Field)doc.getFieldValue(idField.getName()); String idStr = f.stringValue(); BytesRef idBytes = new BytesRef(); fieldType.readableToIndexed(idStr, idBytes); // get the internal document id long segAndId = searcher.lookupId(idBytes); // if docid is valid, run it through the filter if (segAndId >= 0) { int segid = (int) segAndId; AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32)); docid = segid + ctx.docBase; Weight weight = filterQuery.createWeight(searcher); Scorer scorer = weight.scorer(ctx, null); if (scorer == null || segid != scorer.advance(segid)) { // filter doesn't match. docid = -1; } } return docid; }
Example 2
Source File: MatchedQueriesFetchSubPhase.java From Elasticsearch with Apache License 2.0 | 6 votes |
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException { for (Map.Entry<String, Query> entry : namedQueries.entrySet()) { String name = entry.getKey(); Query filter = entry.getValue(); final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false); final Scorer scorer = weight.scorer(hitContext.readerContext()); if (scorer == null) { continue; } final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator(); if (twoPhase == null) { if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) { matchedQueries.add(name); } } else { if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) { matchedQueries.add(name); } } } }
Example 3
Source File: TestLTRScoringQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException, ModelException { final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext() .leaves(); final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts); final LeafReaderContext context = leafContexts.get(n); final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase; final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1); final Scorer scorer = weight.scorer(context); // rerank using the field final-score scorer.iterator().advance(deBasedDoc); scorer.score(); // assertEquals(42.0f, score, 0.0001); // assertTrue(weight instanceof AssertingWeight); // (AssertingIndexSearcher) assertTrue(weight instanceof LTRScoringQuery.ModelWeight); final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight; return modelWeight; }
Example 4
Source File: CompositeVerifyQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { final Scorer indexQueryScorer = indexQueryWeight.scorer(context); if (indexQueryScorer == null) { return null; } final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator()); return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues); } @Override public boolean isCacheable(LeafReaderContext ctx) { return predicateValueSource.isCacheable(ctx); } }; }
Example 5
Source File: TestSelectiveWeightCreation.java From lucene-solr with Apache License 2.0 | 6 votes |
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits, IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException, ModelException { final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext() .leaves(); final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts); final LeafReaderContext context = leafContexts.get(n); final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase; final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1); final Scorer scorer = weight.scorer(context); // rerank using the field final-score scorer.iterator().advance(deBasedDoc); scorer.score(); assertTrue(weight instanceof LTRScoringQuery.ModelWeight); final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight; return modelWeight; }
Example 6
Source File: Lucene.java From crate with Apache License 2.0 | 6 votes |
/** * Check whether there is one or more documents matching the provided query. */ public static boolean exists(IndexSearcher searcher, Query query) throws IOException { final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); // the scorer API should be more efficient at stopping after the first // match than the bulk scorer API for (LeafReaderContext context : searcher.getIndexReader().leaves()) { final Scorer scorer = weight.scorer(context); if (scorer == null) { continue; } final Bits liveDocs = context.reader().getLiveDocs(); final DocIdSetIterator iterator = scorer.iterator(); for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(doc)) { return true; } } } return false; }
Example 7
Source File: TestBlockJoinValidation.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testAdvanceValidationForToChildBjq() throws Exception { Query parentQuery = new MatchAllDocsQuery(); ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter); final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0); Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1); Scorer scorer = weight.scorer(context); final Bits parentDocs = parentsFilter.getBitSet(context); int target; do { // make the parent scorer advance to a doc ID which is not a parent target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2); } while (parentDocs.get(target + 1)); final int illegalTarget = target; IllegalStateException expected = expectThrows(IllegalStateException.class, () -> { scorer.iterator().advance(illegalTarget); }); assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE)); }
Example 8
Source File: DocumentMapper.java From Elasticsearch with Apache License 2.0 | 5 votes |
/** * Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId. */ public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException { ObjectMapper nestedObjectMapper = null; for (ObjectMapper objectMapper : objectMappers().values()) { if (!objectMapper.nested().isNested()) { continue; } Query filter = objectMapper.nestedTypeFilter(); if (filter == null) { continue; } // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and // therefor is guaranteed to be a live doc. final Weight nestedWeight = filter.createWeight(sc.searcher(), false); Scorer scorer = nestedWeight.scorer(context); if (scorer == null) { continue; } if (scorer.iterator().advance(nestedDocId) == nestedDocId) { if (nestedObjectMapper == null) { nestedObjectMapper = objectMapper; } else { if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) { nestedObjectMapper = objectMapper; } } } } return nestedObjectMapper; }
Example 9
Source File: DocValuesAggregates.java From crate with Apache License 2.0 | 5 votes |
@SuppressWarnings({"unchecked", "rawtypes"}) private static Iterable<Row> getRow(AtomicReference<Throwable> killed, Searcher searcher, Query query, List<DocValueAggregator> aggregators) throws IOException { IndexSearcher indexSearcher = searcher.searcher(); Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); List<LeafReaderContext> leaves = indexSearcher.getTopReaderContext().leaves(); Object[] cells = new Object[aggregators.size()]; for (int i = 0; i < aggregators.size(); i++) { cells[i] = aggregators.get(i).initialState(); } for (var leaf : leaves) { Scorer scorer = weight.scorer(leaf); if (scorer == null) { continue; } for (int i = 0; i < aggregators.size(); i++) { aggregators.get(i).loadDocValues(leaf.reader()); } DocIdSetIterator docs = scorer.iterator(); Bits liveDocs = leaf.reader().getLiveDocs(); for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docs.nextDoc()) { if (liveDocs != null && !liveDocs.get(doc)) { continue; } Throwable killCause = killed.get(); if (killCause != null) { Exceptions.rethrowUnchecked(killCause); } for (int i = 0; i < aggregators.size(); i++) { aggregators.get(i).apply(cells[i], doc); } } } for (int i = 0; i < aggregators.size(); i++) { cells[i] = aggregators.get(i).partialResult(cells[i]); } return List.of(new RowN(cells)); }
Example 10
Source File: TestNearSpansOrdered.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * not a direct test of NearSpans, but a demonstration of how/when * this causes problems */ public void testSpanNearScorerSkipTo1() throws Exception { SpanNearQuery q = makeQuery(); Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1); IndexReaderContext topReaderContext = searcher.getTopReaderContext(); LeafReaderContext leave = topReaderContext.leaves().get(0); Scorer s = w.scorer(leave); assertEquals(1, s.iterator().advance(1)); }
Example 11
Source File: TestRangeFacetCounts.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight in = this.in.createWeight(searcher, scoreMode, boost); return new FilterWeight(in) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { used.set(true); return in.scorer(context); } }; }
Example 12
Source File: DoubleRange.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight fastMatchWeight = fastMatchQuery == null ? null : searcher.createWeight(fastMatchQuery, ScoreMode.COMPLETE_NO_SCORES, 1f); return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { final int maxDoc = context.reader().maxDoc(); final DocIdSetIterator approximation; if (fastMatchWeight == null) { approximation = DocIdSetIterator.all(maxDoc); } else { Scorer s = fastMatchWeight.scorer(context); if (s == null) { return null; } approximation = s.iterator(); } final DoubleValues values = valueSource.getValues(context, null); final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { @Override public boolean matches() throws IOException { return values.advanceExact(approximation.docID()) && range.accept(values.doubleValue()); } @Override public float matchCost() { return 100; // TODO: use cost of range.accept() } }; return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); } @Override public boolean isCacheable(LeafReaderContext ctx) { return valueSource.isCacheable(ctx); } }; }
Example 13
Source File: LoggingFetchSubPhase.java From elasticsearch-learning-to-rank with Apache License 2.0 | 4 votes |
void doLog(Query query, List<HitLogConsumer> loggers, IndexSearcher searcher, SearchHit[] hits) throws IOException { // Reorder hits by id so we can scan all the docs belonging to the same // segment by reusing the same scorer. SearchHit[] reordered = new SearchHit[hits.length]; System.arraycopy(hits, 0, reordered, 0, hits.length); Arrays.sort(reordered, Comparator.comparingInt(SearchHit::docId)); int hitUpto = 0; int readerUpto = -1; int endDoc = 0; int docBase = 0; Scorer scorer = null; Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1F); // Loop logic borrowed from lucene QueryRescorer while (hitUpto < reordered.length) { SearchHit hit = reordered[hitUpto]; int docID = hit.docId(); loggers.forEach((l) -> l.nextDoc(hit)); LeafReaderContext readerContext = null; while (docID >= endDoc) { readerUpto++; readerContext = searcher.getTopReaderContext().leaves().get(readerUpto); endDoc = readerContext.docBase + readerContext.reader().maxDoc(); } if (readerContext != null) { // We advanced to another segment: docBase = readerContext.docBase; scorer = weight.scorer(readerContext); } if (scorer != null) { int targetDoc = docID - docBase; int actualDoc = scorer.docID(); if (actualDoc < targetDoc) { actualDoc = scorer.iterator().advance(targetDoc); } if (actualDoc == targetDoc) { // Scoring will trigger log collection scorer.score(); } } hitUpto++; } }
Example 14
Source File: SoftDeletesRetentionMergePolicy.java From lucene-solr with Apache License 2.0 | 4 votes |
private static Scorer getScorer(Query query, CodecReader reader) throws IOException { IndexSearcher s = new IndexSearcher(reader); s.setQueryCache(null); Weight weight = s.createWeight(s.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1.0f); return weight.scorer(reader.getContext()); }
Example 15
Source File: LongRangeFacetCounts.java From lucene-solr with Apache License 2.0 | 4 votes |
private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException { LongRange[] ranges = (LongRange[]) this.ranges; LongRangeCounter counter = new LongRangeCounter(ranges); int missingCount = 0; for (MatchingDocs hits : matchingDocs) { LongValues fv = valueSource.getValues(hits.context, null); totCount += hits.totalHits; final DocIdSetIterator fastMatchDocs; if (fastMatchQuery != null) { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1); Scorer s = fastMatchWeight.scorer(hits.context); if (s == null) { continue; } fastMatchDocs = s.iterator(); } else { fastMatchDocs = null; } DocIdSetIterator docs = hits.bits.iterator(); for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) { if (fastMatchDocs != null) { int fastMatchDoc = fastMatchDocs.docID(); if (fastMatchDoc < doc) { fastMatchDoc = fastMatchDocs.advance(doc); } if (doc != fastMatchDoc) { doc = docs.advance(fastMatchDoc); continue; } } // Skip missing docs: if (fv.advanceExact(doc)) { counter.add(fv.longValue()); } else { missingCount++; } doc = docs.nextDoc(); } } int x = counter.fillCounts(counts); missingCount += x; //System.out.println("totCount " + totCount + " x " + x + " missingCount " + missingCount); totCount -= missingCount; }
Example 16
Source File: LongRange.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight fastMatchWeight = fastMatchQuery == null ? null : searcher.createWeight(fastMatchQuery, ScoreMode.COMPLETE_NO_SCORES, 1f); return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { final int maxDoc = context.reader().maxDoc(); final DocIdSetIterator approximation; if (fastMatchWeight == null) { approximation = DocIdSetIterator.all(maxDoc); } else { Scorer s = fastMatchWeight.scorer(context); if (s == null) { return null; } approximation = s.iterator(); } final LongValues values = valueSource.getValues(context, null); final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { @Override public boolean matches() throws IOException { return values.advanceExact(approximation.docID()) && range.accept(values.longValue()); } @Override public float matchCost() { return 100; // TODO: use cost of range.accept() } }; return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); } @Override public boolean isCacheable(LeafReaderContext ctx) { return valueSource.isCacheable(ctx); } }; }
Example 17
Source File: DoubleRangeFacetCounts.java From lucene-solr with Apache License 2.0 | 4 votes |
private void count(DoubleValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException { DoubleRange[] ranges = (DoubleRange[]) this.ranges; LongRange[] longRanges = new LongRange[ranges.length]; for(int i=0;i<ranges.length;i++) { DoubleRange range = ranges[i]; longRanges[i] = new LongRange(range.label, NumericUtils.doubleToSortableLong(range.min), true, NumericUtils.doubleToSortableLong(range.max), true); } LongRangeCounter counter = new LongRangeCounter(longRanges); int missingCount = 0; for (MatchingDocs hits : matchingDocs) { DoubleValues fv = valueSource.getValues(hits.context, null); totCount += hits.totalHits; final DocIdSetIterator fastMatchDocs; if (fastMatchQuery != null) { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1); Scorer s = fastMatchWeight.scorer(hits.context); if (s == null) { continue; } fastMatchDocs = s.iterator(); } else { fastMatchDocs = null; } DocIdSetIterator docs = hits.bits.iterator(); for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) { if (fastMatchDocs != null) { int fastMatchDoc = fastMatchDocs.docID(); if (fastMatchDoc < doc) { fastMatchDoc = fastMatchDocs.advance(doc); } if (doc != fastMatchDoc) { doc = docs.advance(fastMatchDoc); continue; } } // Skip missing docs: if (fv.advanceExact(doc)) { counter.add(NumericUtils.doubleToSortableLong(fv.doubleValue())); } else { missingCount++; } doc = docs.nextDoc(); } } missingCount += counter.fillCounts(counts); totCount -= missingCount; }
Example 18
Source File: FilterableTermsEnum.java From Elasticsearch with Apache License 2.0 | 4 votes |
public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter) throws IOException { if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) { throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag); } this.docsEnumFlag = docsEnumFlag; if (filter == null) { // Important - need to use the doc count that includes deleted docs // or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951 numDocs = reader.maxDoc(); } List<LeafReaderContext> leaves = reader.leaves(); List<Holder> enums = new ArrayList<>(leaves.size()); final Weight weight; if (filter == null) { weight = null; } else { final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); weight = searcher.createNormalizedWeight(filter, false); } for (LeafReaderContext context : leaves) { Terms terms = context.reader().terms(field); if (terms == null) { continue; } TermsEnum termsEnum = terms.iterator(); if (termsEnum == null) { continue; } BitSet bits = null; if (weight != null) { Scorer scorer = weight.scorer(context); if (scorer == null) { // fully filtered, none matching, no need to iterate on this continue; } DocIdSetIterator docs = scorer.iterator(); // we want to force apply deleted docs final Bits liveDocs = context.reader().getLiveDocs(); if (liveDocs != null) { docs = new FilteredDocIdSetIterator(docs) { @Override protected boolean match(int doc) { return liveDocs.get(doc); } }; } BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); builder.or(docs); bits = builder.build().bits(); // Count how many docs are in our filtered set // TODO make this lazy-loaded only for those that need it? numDocs += bits.cardinality(); } enums.add(new Holder(termsEnum, bits)); } this.enums = enums.toArray(new Holder[enums.size()]); }
Example 19
Source File: GeoDistanceRangeQuery.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Override public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { final Weight boundingBoxWeight; if (boundingBoxFilter != null) { boundingBoxWeight = searcher.createNormalizedWeight(boundingBoxFilter, false); } else { boundingBoxWeight = null; } return new ConstantScoreWeight(this) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { final DocIdSetIterator approximation; if (boundingBoxWeight != null) { Scorer s = boundingBoxWeight.scorer(context); if (s == null) { // if the approximation does not match anything, we're done return null; } approximation = s.iterator(); } else { approximation = DocIdSetIterator.all(context.reader().maxDoc()); } final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues(); final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approximation) { @Override public boolean matches() throws IOException { final int doc = approximation.docID(); values.setDocument(doc); final int length = values.count(); for (int i = 0; i < length; i++) { GeoPoint point = values.valueAt(i); if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) { double d = fixedSourceDistance.calculate(point.lat(), point.lon()); if (d >= inclusiveLowerPoint && d <= inclusiveUpperPoint) { return true; } } } return false; } @Override public float matchCost() { if (distanceBoundingCheck == GeoDistance.ALWAYS_INSTANCE) { return 0.0f; } else { // TODO: is this right (up to 4 comparisons from GeoDistance.SimpleDistanceBoundingCheck)? return 4.0f; } } }; return new ConstantScoreScorer(this, score(), twoPhaseIterator); } }; }
Example 20
Source File: FetchPhase.java From Elasticsearch with Apache License 2.0 | 4 votes |
private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; ObjectMapper current = nestedObjectMapper; String originalName = nestedObjectMapper.name(); InternalSearchHit.InternalNestedIdentity nestedIdentity = null; do { Query parentFilter; nestedParentObjectMapper = documentMapper.findParentObjectMapper(current); if (nestedParentObjectMapper != null) { if (nestedParentObjectMapper.nested().isNested() == false) { current = nestedParentObjectMapper; continue; } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { parentFilter = Queries.newNonNestedFilter(); } Query childFilter = nestedObjectMapper.nestedTypeFilter(); if (childFilter == null) { current = nestedParentObjectMapper; continue; } final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false); Scorer childScorer = childWeight.scorer(subReaderContext); if (childScorer == null) { current = nestedParentObjectMapper; continue; } DocIdSetIterator childIter = childScorer.iterator(); BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext); int offset = 0; int nextParent = parentBits.nextSetBit(currentParent); for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) { offset++; } currentParent = nextParent; current = nestedObjectMapper = nestedParentObjectMapper; int currentPrefix = current == null ? 0 : current.name().length() + 1; nestedIdentity = new InternalSearchHit.InternalNestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity); if (current != null) { originalName = current.name(); } } while (current != null); return nestedIdentity; }