Java Code Examples for org.apache.lucene.index.DocsEnum#NO_MORE_DOCS
The following examples show how to use
org.apache.lucene.index.DocsEnum#NO_MORE_DOCS .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LuceneCorpusAdapter.java From Palmetto with GNU Affero General Public License v3.0 | 6 votes |
@Override public void getDocumentsWithWordAsSet(String word, IntOpenHashSet documents) { DocsEnum docs = null; Term term = new Term(fieldName, word); try { int baseDocId; for (int i = 0; i < reader.length; i++) { docs = reader[i].termDocsEnum(term); baseDocId = contexts[i].docBase; if (docs != null) { while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { documents.add(baseDocId + docs.docID()); } } } } catch (IOException e) { LOGGER.error("Error while requesting documents for word \"" + word + "\".", e); } }
Example 2
Source File: LuceneCorpusAdapter.java From Palmetto with GNU Affero General Public License v3.0 | 6 votes |
@Override public void getDocumentsWithWord(String word, IntArrayList documents) { DocsEnum docs = null; Term term = new Term(fieldName, word); try { int baseDocId; for (int i = 0; i < reader.length; i++) { docs = reader[i].termDocsEnum(term); baseDocId = contexts[i].docBase; if (docs != null) { while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { documents.add(docs.docID() + baseDocId); } } } } catch (IOException e) { LOGGER.error("Error while requesting documents for word \"" + word + "\".", e); } }
Example 3
Source File: TermSearcher.java From SourcererCC with GNU General Public License v3.0 | 4 votes |
public synchronized void searchWithPosition(int queryTermsSeen) { if (null != this.reader) { if (null != this.reader.getContext()) { if (null != this.reader.getContext().leaves()) { Term term = new Term("tokens", this.searchTerm); for (AtomicReaderContext ctx : this.reader.getContext() .leaves()) { int base = ctx.docBase; // SpanTermQuery spanQ = new SpanTermQuery(term); try { DocsAndPositionsEnum docEnum = MultiFields .getTermPositionsEnum(ctx.reader(), MultiFields.getLiveDocs(ctx .reader()), "tokens", term .bytes()); if (null != docEnum) { int doc = DocsEnum.NO_MORE_DOCS; while ((doc = docEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) { long docId = doc + base; CandidateSimInfo simInfo = null; if (this.simMap.containsKey(docId)) { simInfo = this.simMap.get(docId); simInfo.similarity = simInfo.similarity + Math.min(freqTerm, docEnum.freq()); } else { if (earlierDocs.contains(docId)) continue; Document d = SearchManager.searcher .get(shard).getDocument(docId); long candidateId = Long.parseLong(d .get("id")); // Get rid of these early -- we're only // looking for candidates // whose ids are smaller than the query if (candidateId >= this.queryId) { // System.out.println("Query " + // this.queryId + // ", getting rid of " + // candidateId); earlierDocs.add(docId); continue; // we reject the candidate } simInfo = new CandidateSimInfo(); simInfo.doc = d; simInfo.candidateSize = Integer .parseInt(d.get("size")); simInfo.similarity = Math.min(freqTerm, docEnum.freq()); // System.out.println("before putting in simmap "+ // Util.debug_thread()); this.simMap.put(docId, simInfo); // System.out.println("after putting in simmap "+ // Util.debug_thread()); } simInfo.queryMatchPosition = queryTermsSeen; int candidatePos = docEnum.nextPosition(); simInfo.candidateMatchPosition = candidatePos + docEnum.freq(); if (!Util.isSatisfyPosFilter( this.simMap.get(docId).similarity, this.querySize, queryTermsSeen, simInfo.candidateSize, simInfo.candidateMatchPosition, this.computedThreshold)) { // System.out.println("before removing in simmap "+ // Util.debug_thread()); this.simMap.remove(docId); // System.out.println("after removing in simmap "+ // Util.debug_thread()); } } } else { logger.trace("docEnum is null, " + base + ", term: " + this.searchTerm + Util.debug_thread()); } } catch (Exception e) { e.printStackTrace(); logger.error("exception caught " + e.getMessage() + Util.debug_thread() + " search term:" + this.searchTerm); } } } else { logger.debug("leaves are null, " + this.searchTerm + Util.debug_thread()); } } else { logger.debug("getContext is null, " + this.searchTerm + Util.debug_thread()); } } else { logger.debug("this.reader is null, " + this.searchTerm + Util.debug_thread()); } }
Example 4
Source File: WindowSupportingLuceneCorpusAdapter.java From Palmetto with GNU Affero General Public License v3.0 | 4 votes |
protected void requestDocumentsWithWord(String word, IntObjectOpenHashMap<IntArrayList[]> positionsInDocs, IntIntOpenHashMap docLengths, int wordId, int numberOfWords) { DocsAndPositionsEnum docPosEnum = null; Term term = new Term(fieldName, word); int localDocId, globalDocId, baseDocId; IntArrayList positions[]; try { for (int i = 0; i < reader.length; i++) { docPosEnum = reader[i].termPositionsEnum(term); baseDocId = contexts[i].docBase; if (docPosEnum != null) { while (docPosEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) { localDocId = docPosEnum.docID(); globalDocId = localDocId + baseDocId; // if this is the first word and we found a new document if (!positionsInDocs.containsKey(globalDocId)) { positions = new IntArrayList[numberOfWords]; positionsInDocs.put(globalDocId, positions); } else { positions = positionsInDocs.get(globalDocId); } if (positions[wordId] == null) { positions[wordId] = new IntArrayList(); } // Go through the positions inside this document for (int p = 0; p < docPosEnum.freq(); ++p) { positions[wordId].add(docPosEnum.nextPosition()); } if (!docLengths.containsKey(globalDocId)) { // Get the length of the document docLengths.put(globalDocId, reader[i].document(localDocId).getField(docLengthFieldName) .numericValue().intValue()); } } } } } catch (IOException e) { LOGGER.error("Error while requesting documents for word \"" + word + "\".", e); } }