org.apache.solr.search.DocSet Java Examples
The following examples show how to use
org.apache.solr.search.DocSet.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SolrRangeQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
private DocSet createDocSet(SolrIndexSearcher searcher, long cost) throws IOException { int maxDoc = searcher.maxDoc(); BitDocSet liveDocs = searcher.getLiveDocSet(); FixedBitSet liveBits = liveDocs.size() == maxDoc ? null : liveDocs.getBits(); DocSetBuilder builder = new DocSetBuilder(maxDoc, cost); List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves(); int maxTermsPerSegment = 0; for (LeafReaderContext ctx : leaves) { TermsEnum te = getTermsEnum(ctx); int termsVisited = builder.add(te, ctx.docBase); maxTermsPerSegment = Math.max(maxTermsPerSegment, termsVisited); } DocSet set = maxTermsPerSegment <= 1 ? builder.buildUniqueInOrder(liveBits) : builder.build(liveBits); return DocSetUtil.getDocSet(set, searcher); }
Example #2
Source File: AbstractSolrCachingScorer.java From SearchServices with GNU Lesser General Public License v3.0 | 6 votes |
SolrCachingScorerDoIdSetIterator(DocSet in, LeafReaderContext context, SolrIndexSearcher searcher) { this.context = context; if (in instanceof BitDocSet) { matches = (BitDocSet) in; } else { this.matches = new BitDocSet(new FixedBitSet(searcher.maxDoc())); for (DocIterator it = in.iterator(); it.hasNext(); /* */) { matches.addUnique(it.nextDoc()); } } bitSet = matches.getBits(); doc = getBase() - 1; }
Example #3
Source File: TestXJoinQParserPlugin.java From BioSolr with Apache License 2.0 | 6 votes |
@Test public void testMultiValued() throws Exception { Query q = parse(COMPONENT_NAME_4); DocSet docs = searcher.getDocSet(q); assertEquals(4, docs.size()); DocIterator it = docs.iterator(); assertTrue(it.hasNext()); assertEquals(0, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(1, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(2, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(3, it.nextDoc()); assertFalse(it.hasNext()); }
Example #4
Source File: SolrPluginUtils.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Executes a basic query */ public static DocList doSimpleQuery(String sreq, SolrQueryRequest req, int start, int limit) throws IOException { List<String> commands = StrUtils.splitSmart(sreq,';'); String qs = commands.size() >= 1 ? commands.get(0) : ""; try { Query query = QParser.getParser(qs, req).getQuery(); // If the first non-query, non-filter command is a simple sort on an indexed field, then // we can use the Lucene sort ability. Sort sort = null; if (commands.size() >= 2) { sort = SortSpecParsing.parseSortSpec(commands.get(1), req).getSort(); } DocList results = req.getSearcher().getDocList(query,(DocSet)null, sort, start, limit); return results; } catch (SyntaxError e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing query: " + qs); } }
Example #5
Source File: TaggerRequestHandler.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * The set of documents matching the provided 'fq' (filter query). Don't include deleted docs * either. If null is returned, then all docs are available. */ private Bits computeDocCorpus(SolrQueryRequest req) throws SyntaxError, IOException { final String[] corpusFilterQueries = req.getParams().getParams("fq"); final SolrIndexSearcher searcher = req.getSearcher(); final Bits docBits; if (corpusFilterQueries != null && corpusFilterQueries.length > 0) { List<Query> filterQueries = new ArrayList<Query>(corpusFilterQueries.length); for (String corpusFilterQuery : corpusFilterQueries) { QParser qParser = QParser.getParser(corpusFilterQuery, null, req); try { filterQueries.add(qParser.parse()); } catch (SyntaxError e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); } } final DocSet docSet = searcher.getDocSet(filterQueries);//hopefully in the cache docBits = docSet.getBits(); } else { docBits = searcher.getSlowAtomicReader().getLiveDocs(); } return docBits; }
Example #6
Source File: SolrOwnerScorer.java From SearchServices with GNU Lesser General Public License v3.0 | 6 votes |
public static SolrOwnerScorer createOwnerScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException { if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER) { DocSet ownedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority); if (ownedDocs == null) { // Cache miss: query the index for docs where the owner matches the authority. ownedDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_OWNER, authority))); searcher.cacheInsert(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority, ownedDocs); } return new SolrOwnerScorer(weight, ownedDocs, context, searcher); } // Return an empty doc set, as the authority isn't a user. return new SolrOwnerScorer(weight, new BitDocSet(new FixedBitSet(0)), context, searcher); }
Example #7
Source File: BlockJoin.java From lucene-solr with Apache License 2.0 | 6 votes |
/** childInput may also contain parents (i.e. a parent or below will all roll up to that parent) */ public static DocSet toParents(DocSet childInput, BitDocSet parentList, QueryContext qcontext) throws IOException { FixedBitSet parentBits = parentList.getBits(); DocSetCollector collector = new DocSetCollector(qcontext.searcher().maxDoc()); DocIterator iter = childInput.iterator(); int currentParent = -1; while (iter.hasNext()) { int childDoc = iter.nextDoc(); // TODO: skipping if (childDoc <= currentParent) { // use <= since we also allow parents in the input // we already visited this parent continue; } currentParent = parentBits.nextSetBit(childDoc); if (currentParent != DocIdSetIterator.NO_MORE_DOCS) { // only collect the parent the first time we skip to it collector.collect( currentParent ); } } return collector.getDocSet(); }
Example #8
Source File: ParentNodeFacetTreeBuilder.java From BioSolr with Apache License 2.0 | 6 votes |
private Map<String, Set<String>> findParentIdsForNodes(SolrIndexSearcher searcher, Collection<String> nodeIds) throws IOException { Map<String, Set<String>> parentIds = new HashMap<>(); LOGGER.debug("Looking up parents for {} nodes", nodeIds.size()); Query filter = buildFilterQuery(getNodeField(), nodeIds); LOGGER.trace("Filter query: {}", filter); DocSet docs = searcher.getDocSet(filter); for (DocIterator it = docs.iterator(); it.hasNext(); ) { Document doc = searcher.doc(it.nextDoc(), docFields); String nodeId = doc.get(getNodeField()); Set<String> parentIdValues = new HashSet<>(Arrays.asList(doc.getValues(parentField))); parentIds.put(nodeId, parentIdValues); // Record the label, if required if (isLabelRequired(nodeId)) { recordLabel(nodeId, doc.getValues(getLabelField())); } } return parentIds; }
Example #9
Source File: BlockJoin.java From lucene-solr with Apache License 2.0 | 6 votes |
/** acceptDocs will normally be used to avoid deleted documents from being generated as part of the answer DocSet (just use *:*) * although it can be used to further constrain the generated documents. */ public static DocSet toChildren(DocSet parentInput, BitDocSet parentList, DocSet acceptDocs, QueryContext qcontext) throws IOException { FixedBitSet parentBits = parentList.getBits(); DocSetCollector collector = new DocSetCollector(qcontext.searcher().maxDoc()); DocIterator iter = parentInput.iterator(); while (iter.hasNext()) { int parentDoc = iter.nextDoc(); if (!parentList.exists(parentDoc) || parentDoc == 0) { // test for parentDoc==0 here to avoid passing -1 to prevSetBit later on // not a parent, or parent has no children continue; } int prevParent = parentBits.prevSetBit(parentDoc - 1); for (int childDoc = prevParent+1; childDoc<parentDoc; childDoc++) { if (acceptDocs != null && !acceptDocs.exists(childDoc)) continue; // only select live docs collector.collect(childDoc); } } return collector.getDocSet(); }
Example #10
Source File: SimpleFacets.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Returns a grouped facet count for the facet query * * @see FacetParams#FACET_QUERY */ public int getGroupedFacetQueryCount(Query facetQuery, DocSet docSet) throws IOException { // It is okay to retrieve group.field from global because it is never a local param String groupField = global.get(GroupParams.GROUP_FIELD); if (groupField == null) { throw new SolrException ( SolrException.ErrorCode.BAD_REQUEST, "Specify the group.field as parameter or local parameter" ); } @SuppressWarnings({"rawtypes"}) AllGroupsCollector collector = new AllGroupsCollector<>(new TermGroupSelector(groupField)); searcher.search(QueryUtils.combineQueryAndFilter(facetQuery, docSet.getTopFilter()), collector); return collector.getGroupCount(); }
Example #11
Source File: SimpleFacets.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Computes the term->count counts for the specified term values relative to the * * @param field the name of the field to compute term counts against * @param parsed contains the docset to compute term counts relative to * @param terms a list of term values (in the specified field) to compute the counts for */ protected NamedList<Integer> getListedTermCounts(String field, final ParsedParams parsed, List<String> terms) throws IOException { final String sort = parsed.params.getFieldParam(field, FacetParams.FACET_SORT, "empty"); final SchemaField sf = searcher.getSchema().getField(field); final FieldType ft = sf.getType(); final DocSet baseDocset = parsed.docs; final NamedList<Integer> res = new NamedList<>(); Stream<String> inputStream = terms.stream(); if (sort.equals(FacetParams.FACET_SORT_INDEX)) { // it might always make sense inputStream = inputStream.sorted(); } Stream<SimpleImmutableEntry<String,Integer>> termCountEntries = inputStream .map((term) -> new SimpleImmutableEntry<>(term, numDocs(term, sf, ft, baseDocset))); if (sort.equals(FacetParams.FACET_SORT_COUNT)) { termCountEntries = termCountEntries.sorted(Collections.reverseOrder(Map.Entry.comparingByValue())); } termCountEntries.forEach(e -> res.add(e.getKey(), e.getValue())); return res; }
Example #12
Source File: StatsField.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Computes the {@link StatsValues} for this {@link StatsField} relative to the * specified {@link DocSet} * @see #computeBaseDocSet */ public StatsValues computeLocalStatsValues(DocSet base) throws IOException { if (statsToCalculate.isEmpty()) { // perf optimization for the case where we compute nothing // ie: stats.field={!min=$domin}myfield&domin=false return StatsValuesFactory.createStatsValues(this); } if (null != schemaField && !schemaField.getType().isPointField() && (schemaField.multiValued() || schemaField.getType().multiValuedFieldCache())) { // TODO: should this also be used for single-valued string fields? (should work fine) return DocValuesStats.getCounts(searcher, this, base, facets); } else { // either a single valued field we pull from FieldCache, or an explicit // function ValueSource return computeLocalValueSourceStats(base); } }
Example #13
Source File: TestXJoinQParserPlugin.java From BioSolr with Apache License 2.0 | 6 votes |
@Test public void testMultiValued() throws Exception { Query q = parse(COMPONENT_NAME_4); DocSet docs = searcher.getDocSet(q); assertEquals(4, docs.size()); DocIterator it = docs.iterator(); assertTrue(it.hasNext()); assertEquals(0, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(1, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(2, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(3, it.nextDoc()); assertFalse(it.hasNext()); }
Example #14
Source File: CommandHandler.java From lucene-solr with Apache License 2.0 | 6 votes |
private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException { @SuppressWarnings({"rawtypes"}) Command firstCommand = commands.get(0); String field = firstCommand.getKey(); SchemaField sf = searcher.getSchema().getField(field); FieldType fieldType = sf.getType(); @SuppressWarnings({"rawtypes"}) final AllGroupHeadsCollector allGroupHeadsCollector; if (fieldType.getNumberType() != null) { ValueSource vs = fieldType.getValueSource(sf, null); allGroupHeadsCollector = AllGroupHeadsCollector.newCollector(new ValueSourceGroupSelector(vs, new HashMap<>()), firstCommand.getWithinGroupSort()); } else { allGroupHeadsCollector = AllGroupHeadsCollector.newCollector(new TermGroupSelector(firstCommand.getKey()), firstCommand.getWithinGroupSort()); } if (collectors.isEmpty()) { searchWithTimeLimiter(query, filter, allGroupHeadsCollector); } else { collectors.add(allGroupHeadsCollector); searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]))); } return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc())); }
Example #15
Source File: FacetContext.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * @param filter The filter for the bucket that resulted in this context/domain. Can be null if this is the root context. * @param domain The resulting set of documents for this facet. */ public FacetContext sub(Query filter, DocSet domain) { FacetContext ctx = new FacetContext(); ctx.parent = this; ctx.base = domain; ctx.filter = filter; // carry over from parent ctx.flags = flags; ctx.qcontext = qcontext; ctx.req = req; ctx.searcher = searcher; return ctx; }
Example #16
Source File: UnInvertedField.java From lucene-solr with Apache License 2.0 | 5 votes |
public DocToTerm() throws IOException { bigTermSets = new DocSet[bigTerms.size()]; bigTermNums = new int[bigTerms.size()]; int i=0; for (TopTerm tt : bigTerms.values()) { bigTermSets[i] = searcher.getDocSet(tt.termQuery); bigTermNums[i] = tt.termNum; i++; } }
Example #17
Source File: UnInvertedField.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Called for each term in the field being uninverted. * Collects {@link #maxTermCounts} for all bigTerms as well as storing them in {@link #bigTerms}. * @param te positioned at the current term. * @param termNum the ID/pointer/ordinal of the current term. Monotonically increasing between calls. */ @Override protected void visitTerm(TermsEnum te, int termNum) throws IOException { if (termNum >= maxTermCounts.length) { // resize by doubling - for very large number of unique terms, expanding // by 4K and resultant GC will dominate uninvert times. Resize at end if material int[] newMaxTermCounts = new int[ Math.min(Integer.MAX_VALUE-16, maxTermCounts.length*2) ]; System.arraycopy(maxTermCounts, 0, newMaxTermCounts, 0, termNum); maxTermCounts = newMaxTermCounts; } final BytesRef term = te.term(); if (te.docFreq() > maxTermDocFreq) { Term t = new Term(field, term); // this makes a deep copy of the term bytes TopTerm topTerm = new TopTerm(); topTerm.term = t.bytes(); topTerm.termNum = termNum; topTerm.termQuery = new TermQuery(t); bigTerms.put(topTerm.termNum, topTerm); if (deState == null) { deState = new SolrIndexSearcher.DocsEnumState(); deState.fieldName = field; deState.liveDocs = searcher.getLiveDocsBits(); deState.termsEnum = te; // TODO: check for MultiTermsEnum in SolrIndexSearcher could now fail? deState.postingsEnum = postingsEnum; deState.minSetSizeCached = maxTermDocFreq; } postingsEnum = deState.postingsEnum; DocSet set = searcher.getDocSet(deState); maxTermCounts[termNum] = set.size(); } }
Example #18
Source File: FacetHeatmap.java From lucene-solr with Apache License 2.0 | 5 votes |
private Bits getTopAcceptDocs(DocSet docSet, SolrIndexSearcher searcher) throws IOException { if (docSet.size() == searcher.numDocs()) { return null; // means match everything (all live docs). This can speedup things a lot. } else if (docSet.size() == 0) { return new Bits.MatchNoBits(searcher.maxDoc()); // can speedup things a lot } else { return docSet.getBits(); } }
Example #19
Source File: FacetProcessor.java From lucene-solr with Apache License 2.0 | 5 votes |
void fillBucket(SimpleOrderedMap<Object> bucket, Query q, DocSet result, boolean skip, Map<String,Object> facetInfo) throws IOException { boolean needDocSet = (skip==false && freq.getFacetStats().size() > 0) || freq.getSubFacets().size() > 0; long count; if (result != null) { count = result.size(); } else if (needDocSet) { if (q == null) { result = fcontext.base; // result.incref(); // OFF-HEAP } else { result = fcontext.searcher.getDocSet(q, fcontext.base); } count = result.size(); // don't really need this if we are skipping, but it's free. } else { if (q == null) { count = fcontext.base.size(); } else { count = fcontext.searcher.numDocs(q, fcontext.base); } } try { if (!skip) { processStats(bucket, q, result, count); } processSubs(bucket, q, result, skip, facetInfo); } finally { if (result != null) { // result.decref(); // OFF-HEAP result = null; } } }
Example #20
Source File: HighlighterTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void payloadFilteringSpanQuery() throws IOException { clearIndex(); String FIELD_NAME = "payloadDelimited"; assertU(adoc("id", "0", FIELD_NAME, "word|7 word|2")); assertU(commit()); //We search at a lower level than typical Solr tests because there's no QParser for payloads //Create query matching this payload Query query = new SpanPayloadCheckQuery(new SpanTermQuery(new Term(FIELD_NAME, "word")), Collections.singletonList(new BytesRef(new byte[]{0, 0, 0, 7})));//bytes for integer 7 //invoke highlight component... the hard way final SearchComponent hlComp = h.getCore().getSearchComponent("highlight"); SolrQueryRequest req = req("hl", "true", "hl.fl", FIELD_NAME, HighlightParams.USE_PHRASE_HIGHLIGHTER, "true"); try { SolrQueryResponse resp = new SolrQueryResponse(); ResponseBuilder rb = new ResponseBuilder(req, resp, Collections.singletonList(hlComp)); rb.setHighlightQuery(query); rb.setResults(req.getSearcher().getDocListAndSet(query, (DocSet) null, null, 0, 1)); //highlight: hlComp.prepare(rb); hlComp.process(rb); //inspect response final String[] snippets = (String[]) resp.getValues().findRecursive("highlighting", "0", FIELD_NAME); assertEquals("<em>word|7</em> word|2", snippets[0]); } finally { req.close(); } }
Example #21
Source File: SolrFacetService.java From chronix.server with Apache License 2.0 | 5 votes |
/** * @param req the solr query request * @param rsp the solr query response * @param matchingDocs the set of matching documents * @param solrParams the solr request params * @return pivot processor */ PivotFacetProcessor pivotFacetProcessor(SolrQueryRequest req, SolrQueryResponse rsp, DocSet matchingDocs, SolrParams solrParams) { ResponseBuilder rb = new ResponseBuilder(req, rsp, emptyList()); rb.doFacets = true; return new PivotFacetProcessor(req, matchingDocs, solrParams, rb); }
Example #22
Source File: QueryCommand.java From lucene-solr with Apache License 2.0 | 5 votes |
private QueryCommand(Sort sort, Query query, int docsToCollect, boolean needScores, DocSet docSet, String queryString, Query mainQuery) { this.sort = sort; this.query = query; this.docsToCollect = docsToCollect; this.needScores = needScores; this.docSet = docSet; this.queryString = queryString; this.mainQuery = mainQuery; }
Example #23
Source File: CrossCollectionJoinQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public DocSet getDocSet() throws IOException { Query query = getResultQuery(searcher.getSchema().getField(toField), false); if (query == null) { return DocSet.empty(); } return DocSetUtil.createDocSet(searcher, query, null); }
Example #24
Source File: CommandHandler.java From lucene-solr with Apache License 2.0 | 5 votes |
private DocSet computeDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException { int maxDoc = searcher.maxDoc(); final DocSetCollector docSetCollector = new DocSetCollector(maxDoc); List<Collector> allCollectors = new ArrayList<>(collectors); allCollectors.add(docSetCollector); searchWithTimeLimiter(query, filter, MultiCollector.wrap(allCollectors)); return DocSetUtil.getDocSet( docSetCollector, searcher ); }
Example #25
Source File: GraphQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
private DocSet resolveLeafNodes() throws IOException { String field = collectSchemaField.getName(); BooleanQuery.Builder leafNodeQuery = new BooleanQuery.Builder(); Query edgeQuery = collectSchemaField.hasDocValues() ? new DocValuesFieldExistsQuery(field) : new WildcardQuery(new Term(field, "*")); leafNodeQuery.add(edgeQuery, Occur.MUST_NOT); DocSet leafNodes = fromSearcher.getDocSet(leafNodeQuery.build()); return leafNodes; }
Example #26
Source File: ChildDocTransformer.java From lucene-solr with Apache License 2.0 | 5 votes |
ChildDocTransformer(String name, BitSetProducer parentsFilter, DocSet childDocSet, SolrReturnFields returnFields, boolean isNestedSchema, int limit) { this.name = name; this.parentsFilter = parentsFilter; this.childDocSet = childDocSet; this.limit = limit; this.isNestedSchema = isNestedSchema; this.childReturnFields = returnFields!=null? returnFields: new SolrReturnFields(); }
Example #27
Source File: FilterQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { // SolrRequestInfo reqInfo = SolrRequestInfo.getRequestInfo(); if (!(searcher instanceof SolrIndexSearcher)) { // delete-by-query won't have SolrIndexSearcher return new BoostQuery(new ConstantScoreQuery(q), 0).createWeight(searcher, scoreMode, 1f); } SolrIndexSearcher solrSearcher = (SolrIndexSearcher)searcher; DocSet docs = solrSearcher.getDocSet(q); // reqInfo.addCloseHook(docs); // needed for off-heap refcounting return new BoostQuery(new SolrConstantScoreQuery(docs.getTopFilter()), 0).createWeight(searcher, scoreMode, 1f); }
Example #28
Source File: TestXJoinQParserPlugin.java From BioSolr with Apache License 2.0 | 5 votes |
@Test public void testSingleComponent() throws Exception { Query q = parse(COMPONENT_NAME); DocSet docs = searcher.getDocSet(q); assertEquals(2, docs.size()); DocIterator it = docs.iterator(); assertTrue(it.hasNext()); assertEquals(1, it.nextDoc()); assertTrue(it.hasNext()); assertEquals(3, it.nextDoc()); assertFalse(it.hasNext()); }
Example #29
Source File: NumericFacets.java From lucene-solr with Apache License 2.0 | 5 votes |
public static NamedList<Integer> getCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort) throws IOException { final SchemaField sf = searcher.getSchema().getField(fieldName); if (sf.multiValued()) { // TODO: evaluate using getCountsMultiValued for singleValued numerics with SingletonSortedNumericDocValues return getCountsMultiValued(searcher, docs, fieldName, offset, limit, mincount, missing, sort); } return getCountsSingleValue(searcher, docs, fieldName, offset, limit, mincount, missing, sort); }
Example #30
Source File: FacetProcessor.java From lucene-solr with Apache License 2.0 | 5 votes |
@SuppressWarnings("unused") static DocSet getFieldMissing(SolrIndexSearcher searcher, DocSet docs, String fieldName) throws IOException { SchemaField sf = searcher.getSchema().getField(fieldName); DocSet hasVal = searcher.getDocSet(sf.getType().getRangeQuery(null, sf, null, null, false, false)); DocSet answer = docs.andNot(hasVal); // hasVal.decref(); // OFF-HEAP return answer; }