Java Code Examples for org.apache.lucene.index.RandomIndexWriter#close()
The following examples show how to use
org.apache.lucene.index.RandomIndexWriter#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSimilarity2.java From lucene-solr with Apache License 2.0 | 6 votes |
/** similar to the above, but ORs the query with a real field */ public void testEmptyField() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(newTextField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); for (Similarity sim : sims) { is.setSimilarity(sim); BooleanQuery.Builder query = new BooleanQuery.Builder(); query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD); query.add(new TermQuery(new Term("bar", "baz")), BooleanClause.Occur.SHOULD); assertEquals(1, is.search(query.build(), 10).totalHits.value); } ir.close(); dir.close(); }
Example 2
Source File: TestBasics.java From lucene-solr with Apache License 2.0 | 6 votes |
@BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true)) .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000)).setMergePolicy(newLogMergePolicy())); //writer.infoStream = System.out; for (int i = 0; i < 2000; i++) { Document doc = new Document(); doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES)); writer.addDocument(doc); } reader = writer.getReader(); searcher = newSearcher(reader); writer.close(); }
Example 3
Source File: TestDrillSideways.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testEmptyIndex() throws Exception { // LUCENE-5045: make sure DrillSideways works with an empty index Directory dir = newDirectory(); Directory taxoDir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE); IndexSearcher searcher = newSearcher(writer.getReader()); TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter); // Count "Author" FacetsConfig config = new FacetsConfig(); DrillSideways ds = getNewDrillSideways(searcher, config, taxoReader); DrillDownQuery ddq = new DrillDownQuery(config); ddq.add("Author", "Lisa"); DrillSidewaysResult r = ds.search(ddq, 10); // this used to fail on IllegalArgEx assertEquals(0, r.hits.totalHits.value); r = ds.search(ddq, null, null, 10, new Sort(new SortField("foo", SortField.Type.INT)), false); // this used to fail on IllegalArgEx assertEquals(0, r.hits.totalHits.value); writer.close(); IOUtils.close(taxoWriter, searcher.getIndexReader(), taxoReader, dir, taxoDir); }
Example 4
Source File: BaseXYPointTestCase.java From lucene-solr with Apache License 2.0 | 6 votes |
/** test we can search for a polygon with a hole (but still includes the doc) */ public void testPolygonHole() throws Exception { assumeTrue("Impl does not support polygons", supportsPolygons()); Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a doc with a point Document document = new Document(); addPointToDoc("field", document, 18.313694f, -65.227444f); writer.addDocument(document); // search and verify we found our doc IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); XYPolygon inner = new XYPolygon(new float[] { 18.5f, 18.5f, 18.7f, 18.7f, 18.5f }, new float[] { -65.7f, -65.4f, -65.4f, -65.7f, -65.7f }); XYPolygon outer = new XYPolygon(new float[] { 18, 18, 19, 19, 18 }, new float[] { -66, -65, -65, -66, -66 }, inner); assertEquals(1, searcher.count(newPolygonQuery("field", outer))); reader.close(); writer.close(); dir.close(); }
Example 5
Source File: TestIntervals.java From lucene-solr with Apache License 2.0 | 6 votes |
@BeforeClass public static void setupIndex() throws IOException { directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < field1_docs.length; i++) { Document doc = new Document(); doc.add(new Field("field1", field1_docs[i], FIELD_TYPE)); doc.add(new Field("field2", field2_docs[i], FIELD_TYPE)); doc.add(new StringField("id", Integer.toString(i), Field.Store.NO)); doc.add(new NumericDocValuesField("id", i)); writer.addDocument(doc); } writer.close(); searcher = new IndexSearcher(DirectoryReader.open(directory)); }
Example 6
Source File: TestPrefixQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testPrefixQuery() throws Exception { Directory directory = newDirectory(); String[] categories = new String[] {"/Computers", "/Computers/Mac", "/Computers/Windows"}; RandomIndexWriter writer = new RandomIndexWriter(random(), directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(newStringField("category", categories[i], Field.Store.YES)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); PrefixQuery query = new PrefixQuery(new Term("category", "/Computers")); IndexSearcher searcher = newSearcher(reader); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals("All documents in /Computers category and below", 3, hits.length); query = new PrefixQuery(new Term("category", "/Computers/Mac")); hits = searcher.search(query, 1000).scoreDocs; assertEquals("One in /Computers/Mac", 1, hits.length); query = new PrefixQuery(new Term("category", "")); hits = searcher.search(query, 1000).scoreDocs; assertEquals("everything", 3, hits.length); writer.close(); reader.close(); directory.close(); }
Example 7
Source File: TestUnifiedHighlighter.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testMultiplePassages() throws Exception { RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer); Field body = new Field("body", "", fieldType); Document doc = new Document(); doc.add(body); body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore."); iw.addDocument(doc); body.setStringValue("This test is another test. Not a good sentence. Test test test test."); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher searcher = newSearcher(ir); UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer); Query query = new TermQuery(new Term("body", "test")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits.value); String snippets[] = highlighter.highlight("body", query, topDocs, 2); assertEquals(2, snippets.length); assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", snippets[0]); assertEquals("This <b>test</b> is another <b>test</b>. ... <b>Test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[1]); ir.close(); }
Example 8
Source File: TestCheckJoinIndex.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testInconsistentDeletes() throws IOException { final Directory dir = newDirectory(); final IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setMergePolicy(NoMergePolicy.INSTANCE); // so that deletions don't trigger merges final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); List<Document> block = new ArrayList<>(); final int numChildren = TestUtil.nextInt(random(), 1, 3); for (int i = 0; i < numChildren; ++i) { Document doc = new Document(); doc.add(new StringField("child", Integer.toString(i), Store.NO)); block.add(doc); } Document parent = new Document(); parent.add(new StringField("parent", "true", Store.NO)); block.add(parent); w.addDocuments(block); if (random().nextBoolean()) { w.deleteDocuments(new Term("parent", "true")); } else { // delete any of the children w.deleteDocuments(new Term("child", Integer.toString(random().nextInt(numChildren)))); } final IndexReader reader = w.getReader(); w.close(); BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "true"))); try { expectThrows(IllegalStateException.class, () -> CheckJoinIndex.check(reader, parentsFilter)); } finally { reader.close(); dir.close(); } }
Example 9
Source File: TestFuzzyQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * MultiTermQuery provides (via attribute) information about which values * must be competitive to enter the priority queue. * * FuzzyQuery optimizes itself around this information, if the attribute * is not implemented correctly, there will be problems! */ public void testTieBreaker() throws Exception { Directory directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), directory); addDoc("a123456", writer); addDoc("c123456", writer); addDoc("d123456", writer); addDoc("e123456", writer); Directory directory2 = newDirectory(); RandomIndexWriter writer2 = new RandomIndexWriter(random(), directory2); addDoc("a123456", writer2); addDoc("b123456", writer2); addDoc("b123456", writer2); addDoc("b123456", writer2); addDoc("c123456", writer2); addDoc("f123456", writer2); IndexReader ir1 = writer.getReader(); IndexReader ir2 = writer2.getReader(); MultiReader mr = new MultiReader(ir1, ir2); IndexSearcher searcher = newSearcher(mr); FuzzyQuery fq = new FuzzyQuery(new Term("field", "z123456"), 1, 0, 2, false); TopDocs docs = searcher.search(fq, 2); assertEquals(5, docs.totalHits.value); // 5 docs, from the a and b's mr.close(); ir1.close(); ir2.close(); writer.close(); writer2.close(); directory.close(); directory2.close(); }
Example 10
Source File: TestTopFieldCollector.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { super.setUp(); dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); int numDocs = atLeast(100); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); iw.addDocument(doc); } ir = iw.getReader(); iw.close(); is = newSearcher(ir); }
Example 11
Source File: TestFuzzyQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testGiga() throws Exception { Directory index = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), index); addDoc("Lucene in Action", w); addDoc("Lucene for Dummies", w); //addDoc("Giga", w); addDoc("Giga byte", w); addDoc("ManagingGigabytesManagingGigabyte", w); addDoc("ManagingGigabytesManagingGigabytes", w); addDoc("The Art of Computer Science", w); addDoc("J. K. Rowling", w); addDoc("JK Rowling", w); addDoc("Joanne K Roling", w); addDoc("Bruce Willis", w); addDoc("Willis bruce", w); addDoc("Brute willis", w); addDoc("B. willis", w); IndexReader r = w.getReader(); w.close(); Query q = new FuzzyQuery(new Term("field", "giga"), 0); // 3. search IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); r.close(); w.close(); index.close(); }
Example 12
Source File: TestPrefixCompletionQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testAnalyzerDefaults() throws Exception { Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET); CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer); final String field = getTestName(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field)); Document document = new Document(); document.add(new SuggestField(field, "foobar", 7)); document.add(new SuggestField(field, "foo bar", 8)); document.add(new SuggestField(field, "the fo", 9)); document.add(new SuggestField(field, "the foo bar", 10)); document.add(new SuggestField(field, "foo the bar", 11)); // middle stopword document.add(new SuggestField(field, "baz the", 12)); // trailing stopword iw.addDocument(document); DirectoryReader reader = iw.getReader(); SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader); CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo")); TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with "fo*" assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("foo bar", 8), new Entry("foobar", 7)); // with leading stopword query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "_ fo*" suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("the foo bar", 10), new Entry("the fo", 9)); // with middle stopword query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo _ bar*" suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("foo the bar", 11)); // no space query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foob")); suggest = indexSearcher.suggest(query, 9, false); assertSuggestions(suggest, new Entry("foobar", 7)); // surrounding stopwords query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "_ baz _" suggest = indexSearcher.suggest(query, 4, false); assertSuggestions(suggest); reader.close(); iw.close(); }
Example 13
Source File: TestRangeFacetCounts.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testBasicDouble() throws Exception { Directory d = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); DoubleDocValuesField field = new DoubleDocValuesField("field", 0.0); doc.add(field); for(long l=0;l<100;l++) { field.setDoubleValue(l); w.addDocument(doc); } IndexReader r = w.getReader(); FacetsCollector fc = new FacetsCollector(); IndexSearcher s = newSearcher(r); s.search(new MatchAllDocsQuery(), fc); Facets facets = new DoubleRangeFacetCounts("field", fc, new DoubleRange("less than 10", 0.0, true, 10.0, false), new DoubleRange("less than or equal to 10", 0.0, true, 10.0, true), new DoubleRange("over 90", 90.0, false, 100.0, false), new DoubleRange("90 or above", 90.0, true, 100.0, false), new DoubleRange("over 1000", 1000.0, false, Double.POSITIVE_INFINITY, false)); assertEquals("dim=field path=[] value=21 childCount=5\n less than 10 (10)\n less than or equal to 10 (11)\n over 90 (9)\n 90 or above (10)\n over 1000 (0)\n", facets.getTopChildren(10, "field").toString()); w.close(); IOUtils.close(r, d); }
Example 14
Source File: TestUnifiedHighlighterTermVec.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test(expected = IllegalArgumentException.class) public void testUserFailedToIndexOffsets() throws IOException { FieldType fieldType = new FieldType(UHTestHelper.tvType); // note: it's indexed too fieldType.setStoreTermVectorPositions(random().nextBoolean()); fieldType.setStoreTermVectorOffsets(false); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer); Document doc = new Document(); doc.add(new Field("body", "term vectors", fieldType)); iw.addDocument(doc); IndexReader ir = iw.getReader(); iw.close(); IndexSearcher searcher = newSearcher(ir); UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer) { @Override protected Set<HighlightFlag> getFlags(String field) { return Collections.emptySet();//no WEIGHT_MATCHES } }; TermQuery query = new TermQuery(new Term("body", "vectors")); TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); try { highlighter.highlight("body", query, topDocs, 1);//should throw } finally { ir.close(); } }
Example 15
Source File: TestFieldCache.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testLongFieldCache() throws IOException { Directory dir = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random())); cfg.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg); Document doc = new Document(); LongPoint field = new LongPoint("f", 0L); StoredField field2 = new StoredField("f", 0L); doc.add(field); doc.add(field2); final long[] values = new long[TestUtil.nextInt(random(), 1, 10)]; Set<Integer> missing = new HashSet<>(); for (int i = 0; i < values.length; ++i) { final long v; switch (random().nextInt(10)) { case 0: v = Long.MIN_VALUE; break; case 1: v = 0; break; case 2: v = Long.MAX_VALUE; break; default: v = TestUtil.nextLong(random(), -10, 10); break; } values[i] = v; if (v == 0 && random().nextBoolean()) { // missing iw.addDocument(new Document()); missing.add(i); } else { field.setLongValue(v); field2.setLongValue(v); iw.addDocument(doc); } } iw.forceMerge(1); final DirectoryReader reader = iw.getReader(); final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.LONG_POINT_PARSER); for (int i = 0; i < values.length; ++i) { if (missing.contains(i) == false) { assertEquals(i, longs.nextDoc()); assertEquals(values[i], longs.longValue()); } } assertEquals(NO_MORE_DOCS, longs.nextDoc()); reader.close(); iw.close(); dir.close(); }
Example 16
Source File: TestLatLonShape.java From lucene-solr with Apache License 2.0 | 4 votes |
/** test we can search for a point with a standard number of vertices*/ public void testBasicIntersects() throws Exception { int numVertices = TestUtil.nextInt(random(), 50, 100); Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); // add a random polygon document Polygon p = GeoTestUtil.createRegularPolygon(0, 90, atLeast(1000000), numVertices); Document document = new Document(); addPolygonsToDoc(FIELDNAME, document, p); writer.addDocument(document); // add a line document document = new Document(); // add a line string double lats[] = new double[p.numPoints() - 1]; double lons[] = new double[p.numPoints() - 1]; for (int i = 0; i < lats.length; ++i) { lats[i] = p.getPolyLat(i); lons[i] = p.getPolyLon(i); } Line l = new Line(lats, lons); addLineToDoc(FIELDNAME, document, l); writer.addDocument(document); ////// search ///// // search an intersecting bbox IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); double minLat = Math.min(lats[0], lats[1]); double minLon = Math.min(lons[0], lons[1]); double maxLat = Math.max(lats[0], lats[1]); double maxLon = Math.max(lons[0], lons[1]); Query q = newRectQuery(FIELDNAME, minLat, maxLat, minLon, maxLon); assertEquals(2, searcher.count(q)); // search a disjoint bbox q = newRectQuery(FIELDNAME, p.minLat-1d, p.minLat+1, p.minLon-1d, p.minLon+1d); assertEquals(0, searcher.count(q)); IOUtils.close(reader, dir); }
Example 17
Source File: TestLongDistanceFeatureQuery.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testOverUnderFlow() throws IOException { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig() .setMergePolicy(newLogMergePolicy(random().nextBoolean()))); Document doc = new Document(); LongPoint point = new LongPoint("foo", 0L); doc.add(point); NumericDocValuesField docValue = new NumericDocValuesField("foo", 0L); doc.add(docValue); point.setLongValue(3); docValue.setLongValue(3); w.addDocument(doc); point.setLongValue(12); docValue.setLongValue(12); w.addDocument(doc); point.setLongValue(-10); docValue.setLongValue(-10); w.addDocument(doc); point.setLongValue(Long.MAX_VALUE); docValue.setLongValue(Long.MAX_VALUE); w.addDocument(doc); point.setLongValue(Long.MIN_VALUE); docValue.setLongValue(Long.MIN_VALUE); w.addDocument(doc); DirectoryReader reader = w.getReader(); IndexSearcher searcher = newSearcher(reader); Query q = LongPoint.newDistanceFeatureQuery("foo", 3, Long.MAX_VALUE - 1, 100); TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 1); searcher.search(q, collector); TopDocs topHits = collector.topDocs(); assertEquals(2, topHits.scoreDocs.length); CheckHits.checkEqual(q, new ScoreDoc[] { new ScoreDoc(3, (float) (3f * (100. / (100. + 1.)))), new ScoreDoc(0, (float) (3f * (100. / (100. + Long.MAX_VALUE)))) // rounding makes the distance treated as if it was MAX_VALUE }, topHits.scoreDocs); q = LongPoint.newDistanceFeatureQuery("foo", 3, Long.MIN_VALUE + 1, 100); collector = TopScoreDocCollector.create(2, null, 1); searcher.search(q, collector); topHits = collector.topDocs(); assertEquals(2, topHits.scoreDocs.length); CheckHits.checkExplanations(q, "", searcher); CheckHits.checkEqual(q, new ScoreDoc[] { new ScoreDoc(4, (float) (3f * (100. / (100. + 1.)))), new ScoreDoc(0, (float) (3f * (100. / (100. + Long.MAX_VALUE)))) // rounding makes the distance treated as if it was MAX_VALUE }, topHits.scoreDocs); reader.close(); w.close(); dir.close(); }
Example 18
Source File: TestPositionIncrement.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testPayloadsPos0() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockPayloadAnalyzer()); Document doc = new Document(); doc.add(new TextField("content", new StringReader( "a a b c d e a f g h i j a b k k"))); writer.addDocument(doc); final IndexReader readerFromWriter = writer.getReader(); LeafReader r = getOnlyLeafReader(readerFromWriter); PostingsEnum tp = r.postings(new Term("content", "a"), PostingsEnum.ALL); int count = 0; assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); // "a" occurs 4 times assertEquals(4, tp.freq()); assertEquals(0, tp.nextPosition()); assertEquals(1, tp.nextPosition()); assertEquals(3, tp.nextPosition()); assertEquals(6, tp.nextPosition()); // only one doc has "a" assertEquals(DocIdSetIterator.NO_MORE_DOCS, tp.nextDoc()); IndexSearcher is = newSearcher(getOnlyLeafReader(readerFromWriter)); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); SpanQuery[] sqs = { stq1, stq2 }; SpanNearQuery snq = new SpanNearQuery(sqs, 30, false); count = 0; boolean sawZero = false; if (VERBOSE) { System.out.println("\ngetPayloadSpans test"); } PayloadSpanCollector collector = new PayloadSpanCollector(); Spans pspans = snq.createWeight(is, ScoreMode.COMPLETE_NO_SCORES, 1f).getSpans(is.getIndexReader().leaves().get(0), SpanWeight.Postings.PAYLOADS); while (pspans.nextDoc() != Spans.NO_MORE_DOCS) { while (pspans.nextStartPosition() != Spans.NO_MORE_POSITIONS) { if (VERBOSE) { System.out.println("doc " + pspans.docID() + ": span " + pspans.startPosition() + " to " + pspans.endPosition()); } collector.reset(); pspans.collect(collector); sawZero |= pspans.startPosition() == 0; for (BytesRef payload : collector.payloads) { count++; if (VERBOSE) { System.out.println(" payload: " + Term.toString(payload)); } } } } assertTrue(sawZero); assertEquals(8, count); // System.out.println("\ngetSpans test"); Spans spans = snq.createWeight(is, ScoreMode.COMPLETE_NO_SCORES, 1f).getSpans(is.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS); count = 0; sawZero = false; while (spans.nextDoc() != Spans.NO_MORE_DOCS) { while (spans.nextStartPosition() != Spans.NO_MORE_POSITIONS) { count++; sawZero |= spans.startPosition() == 0; // System.out.println(spans.doc() + " - " + spans.start() + " - " + // spans.end()); } } assertEquals(4, count); assertTrue(sawZero); writer.close(); is.getIndexReader().close(); dir.close(); }
Example 19
Source File: TestTimeLimitingCollector.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * initializes searcher with a document set */ @Override public void setUp() throws Exception { super.setUp(); counter = Counter.newCounter(true); counterThread = new TimerThread(counter); counterThread.start(); final String docText[] = { "docThatNeverMatchesSoWeCanRequireLastDocCollectedToBeGreaterThanZero", "one blah three", "one foo three multiOne", "one foobar three multiThree", "blueberry pancakes", "blueberry pie", "blueberry strudel", "blueberry pizza", }; directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); for (int i=0; i<N_DOCS; i++) { add(docText[i%docText.length], iw); } reader = iw.getReader(); iw.close(); searcher = newSearcher(reader); BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder(); booleanQuery.add(new TermQuery(new Term(FIELD_NAME, "one")), BooleanClause.Occur.SHOULD); // start from 1, so that the 0th doc never matches for (int i = 1; i < docText.length; i++) { String[] docTextParts = docText[i].split("\\s+"); for (String docTextPart : docTextParts) { // large query so that search will be longer booleanQuery.add(new TermQuery(new Term(FIELD_NAME, docTextPart)), BooleanClause.Occur.SHOULD); } } query = booleanQuery.build(); // warm the searcher searcher.search(query, 1000); }
Example 20
Source File: TestCachingTokenFilter.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testCaching() throws IOException { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir); Document doc = new Document(); AtomicInteger resetCount = new AtomicInteger(0); TokenStream stream = new TokenStream() { private int index = 0; private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); @Override public void reset() throws IOException { super.reset(); resetCount.incrementAndGet(); } @Override public boolean incrementToken() { if (index == tokens.length) { return false; } else { clearAttributes(); termAtt.append(tokens[index++]); offsetAtt.setOffset(0,0); return true; } } }; stream = new CachingTokenFilter(stream); doc.add(new TextField("preanalyzed", stream)); // 1) we consume all tokens twice before we add the doc to the index assertFalse(((CachingTokenFilter)stream).isCached()); stream.reset(); assertFalse(((CachingTokenFilter) stream).isCached()); checkTokens(stream); stream.reset(); checkTokens(stream); assertTrue(((CachingTokenFilter)stream).isCached()); // 2) now add the document to the index and verify if all tokens are indexed // don't reset the stream here, the DocumentWriter should do that implicitly writer.addDocument(doc); IndexReader reader = writer.getReader(); PostingsEnum termPositions = MultiTerms.getTermPostingsEnum(reader, "preanalyzed", new BytesRef("term1")); assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(1, termPositions.freq()); assertEquals(0, termPositions.nextPosition()); termPositions = MultiTerms.getTermPostingsEnum(reader, "preanalyzed", new BytesRef("term2")); assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(2, termPositions.freq()); assertEquals(1, termPositions.nextPosition()); assertEquals(3, termPositions.nextPosition()); termPositions = MultiTerms.getTermPostingsEnum(reader, "preanalyzed", new BytesRef("term3")); assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(1, termPositions.freq()); assertEquals(2, termPositions.nextPosition()); reader.close(); writer.close(); // 3) reset stream and consume tokens again stream.reset(); checkTokens(stream); assertEquals(1, resetCount.get()); dir.close(); }