Java Code Examples for org.apache.lucene.index.DirectoryReader#openIfChanged()
The following examples show how to use
org.apache.lucene.index.DirectoryReader#openIfChanged() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransienceManager.java From jstarcraft-core with Apache License 2.0 | 6 votes |
@Override public IndexReader getReader() { try { if (changed.compareAndSet(true, false)) { this.writer.flush(); DirectoryReader reader = DirectoryReader.openIfChanged(this.reader); if (reader != null) { this.reader.close(); this.reader = reader; } } return this.reader; } catch (Exception exception) { throw new StorageException(exception); } }
Example 2
Source File: LumongoSegment.java From lumongo with Apache License 2.0 | 6 votes |
private void openReaderIfChanges() throws IOException { DirectoryReader newDirectoryReader = DirectoryReader .openIfChanged(directoryReader, indexWriter, indexConfig.getIndexSettings().getApplyUncommittedDeletes()); if (newDirectoryReader != null) { directoryReader = newDirectoryReader; QueryResultCache qrc = queryResultCache; if (qrc != null) { qrc.clear(); } } DirectoryTaxonomyReader newone = TaxonomyReader.openIfChanged(taxoReader); if (newone != null) { taxoReader = newone; } }
Example 3
Source File: IndexReplicationClientTest.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public Boolean call() throws Exception { if (reader == null) { reader = DirectoryReader.open(indexDir); lastGeneration = reader.getIndexCommit().getGeneration(); } else { DirectoryReader newReader = DirectoryReader.openIfChanged(reader); assertNotNull("should not have reached here if no changes were made to the index", newReader); long newGeneration = newReader.getIndexCommit().getGeneration(); assertTrue("expected newer generation; current=" + lastGeneration + " new=" + newGeneration, newGeneration > lastGeneration); reader.close(); reader = newReader; lastGeneration = newGeneration; TestUtil.checkIndex(indexDir); } return null; }
Example 4
Source File: LuceneTranslationMemory.java From modernmt with Apache License 2.0 | 6 votes |
protected synchronized IndexReader getIndexReader() throws IOException { if (this._indexReader == null) { this._indexReader = DirectoryReader.open(this.indexDirectory); this._indexReader.incRef(); this._indexSearcher = new IndexSearcher(this._indexReader); } else { DirectoryReader reader = DirectoryReader.openIfChanged(this._indexReader); if (reader != null) { this._indexReader.close(); this._indexReader = reader; this._indexReader.incRef(); this._indexSearcher = new IndexSearcher(this._indexReader); this._indexSearcher.setSimilarity(analyzerFactory.createSimilarity()); } } return this._indexReader; }
Example 5
Source File: LuceneService.java From ml-blog with MIT License | 5 votes |
private IndexSearcher getIndexSearcher() throws IOException { if (reader == null) { reader = DirectoryReader.open(directory); } else { DirectoryReader changeReader = DirectoryReader.openIfChanged(reader); if (changeReader != null) { reader.close(); reader = changeReader; } } return new IndexSearcher(reader); }
Example 6
Source File: MutatableActionTest.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private DirectoryReader commitAndReopen(DirectoryReader reader, IndexWriter writer) throws IOException { writer.commit(); DirectoryReader newReader = DirectoryReader.openIfChanged(reader); if (newReader == null) { throw new IOException("Should have new data."); } reader.close(); return newReader; }
Example 7
Source File: BlurIndexSimpleWriter.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void commit() throws IOException { Tracer trace1 = Trace.trace("prepareCommit"); BlurIndexWriter writer = getBlurIndexWriter(); writer.prepareCommit(); trace1.done(); Tracer trace2 = Trace.trace("commit"); writer.commit(); trace2.done(); Tracer trace3 = Trace.trace("index refresh"); DirectoryReader currentReader = _indexReader.get(); DirectoryReader newReader = DirectoryReader.openIfChanged(currentReader); if (newReader == null) { LOG.debug("Reader should be new after commit for table [{0}] shard [{1}].", _tableContext.getTable(), _shardContext.getShard()); } else { DirectoryReader reader = wrap(newReader); checkForMemoryLeaks(reader, "BlurIndexSimpleWriter - reopen table [{0}] shard [{1}]"); _indexRefreshWriteLock.lock(); try { _indexReader.set(reader); } finally { _indexRefreshWriteLock.unlock(); } _indexCloser.close(currentReader); } trace3.done(); }
Example 8
Source File: TestFieldCacheReopen.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testFieldCacheReuseAfterReopen() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())). setMergePolicy(newLogMergePolicy(10)) ); Document doc = new Document(); doc.add(new IntPoint("number", 17)); writer.addDocument(doc); writer.commit(); // Open reader1 DirectoryReader r = DirectoryReader.open(dir); LeafReader r1 = getOnlyLeafReader(r); final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.INT_POINT_PARSER); assertEquals(0, ints.nextDoc()); assertEquals(17, ints.longValue()); // Add new segment writer.addDocument(doc); writer.commit(); // Reopen reader1 --> reader2 DirectoryReader r2 = DirectoryReader.openIfChanged(r); assertNotNull(r2); r.close(); LeafReader sub0 = r2.leaves().get(0).reader(); final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.INT_POINT_PARSER); r2.close(); assertEquals(0, ints2.nextDoc()); assertEquals(17, ints2.longValue()); writer.close(); dir.close(); }
Example 9
Source File: SearcherTaxonomyManager.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected SearcherAndTaxonomy refreshIfNeeded(SearcherAndTaxonomy ref) throws IOException { // Must re-open searcher first, otherwise we may get a // new reader that references ords not yet known to the // taxonomy reader: final IndexReader r = ref.searcher.getIndexReader(); final IndexReader newReader = DirectoryReader.openIfChanged((DirectoryReader) r); if (newReader == null) { return null; } else { DirectoryTaxonomyReader tr; try { tr = TaxonomyReader.openIfChanged(ref.taxonomyReader); } catch (Throwable t1) { try { IOUtils.close(newReader); } catch (Throwable t2) { t2.addSuppressed(t2); } throw t1; } if (tr == null) { ref.taxonomyReader.incRef(); tr = ref.taxonomyReader; } else if (taxoWriter != null && taxoWriter.getTaxonomyEpoch() != taxoEpoch) { IOUtils.close(newReader, tr); throw new IllegalStateException("DirectoryTaxonomyWriter.replaceTaxonomy was called, which is not allowed when using SearcherTaxonomyManager"); } return new SearcherAndTaxonomy(SearcherManager.getSearcher(searcherFactory, newReader, r), tr); } }
Example 10
Source File: IndexAndTaxonomyReplicationClientTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public Boolean call() throws Exception { if (indexReader == null) { indexReader = DirectoryReader.open(indexDir); lastIndexGeneration = indexReader.getIndexCommit().getGeneration(); taxoReader = new DirectoryTaxonomyReader(taxoDir); } else { // verify search index DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader); assertNotNull("should not have reached here if no changes were made to the index", newReader); long newGeneration = newReader.getIndexCommit().getGeneration(); assertTrue("expected newer generation; current=" + lastIndexGeneration + " new=" + newGeneration, newGeneration > lastIndexGeneration); indexReader.close(); indexReader = newReader; lastIndexGeneration = newGeneration; TestUtil.checkIndex(indexDir); // verify taxonomy index DirectoryTaxonomyReader newTaxoReader = TaxonomyReader.openIfChanged(taxoReader); if (newTaxoReader != null) { taxoReader.close(); taxoReader = newTaxoReader; } TestUtil.checkIndex(taxoDir); // verify faceted search int id = Integer.parseInt(indexReader.getIndexCommit().getUserData().get(VERSION_ID), 16); IndexSearcher searcher = new IndexSearcher(indexReader); FacetsCollector fc = new FacetsCollector(); searcher.search(new MatchAllDocsQuery(), fc); Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); assertEquals(1, facets.getSpecificValue("A", Integer.toString(id, 16)).intValue()); DrillDownQuery drillDown = new DrillDownQuery(config); drillDown.add("A", Integer.toString(id, 16)); TopDocs docs = searcher.search(drillDown, 10); assertEquals(1, docs.totalHits.value); } return null; }
Example 11
Source File: SpatialTestCase.java From lucene-solr with Apache License 2.0 | 5 votes |
protected void commit() throws IOException { indexWriter.commit(); DirectoryReader newReader = DirectoryReader.openIfChanged(indexReader); if (newReader != null) { IOUtils.close(indexReader); indexReader = newReader; } indexSearcher = newSearcher(indexReader); }
Example 12
Source File: ReopenReaderTask.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public int doLogic() throws IOException { DirectoryReader r = getRunData().getIndexReader(); DirectoryReader nr = DirectoryReader.openIfChanged(r); if (nr != null) { getRunData().setIndexReader(nr); nr.decRef(); } r.decRef(); return 1; }
Example 13
Source File: DirectoryTaxonomyReader.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Implements the opening of a new {@link DirectoryTaxonomyReader} instance if * the taxonomy has changed. * * <p> * <b>NOTE:</b> the returned {@link DirectoryTaxonomyReader} shares the * ordinal and category caches with this reader. This is not expected to cause * any issues, unless the two instances continue to live. The reader * guarantees that the two instances cannot affect each other in terms of * correctness of the caches, however if the size of the cache is changed * through {@link #setCacheSize(int)}, it will affect both reader instances. */ @Override protected DirectoryTaxonomyReader doOpenIfChanged() throws IOException { ensureOpen(); // This works for both NRT and non-NRT readers (i.e. an NRT reader remains NRT). final DirectoryReader r2 = DirectoryReader.openIfChanged(indexReader); if (r2 == null) { return null; // no changes, nothing to do } // check if the taxonomy was recreated boolean success = false; try { boolean recreated = false; if (taxoWriter == null) { // not NRT, check epoch from commit data String t1 = indexReader.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_EPOCH); String t2 = r2.getIndexCommit().getUserData().get(DirectoryTaxonomyWriter.INDEX_EPOCH); if (t1 == null) { if (t2 != null) { recreated = true; } } else if (!t1.equals(t2)) { // t1 != null and t2 must not be null b/c DirTaxoWriter always puts the commit data. // it's ok to use String.equals because we require the two epoch values to be the same. recreated = true; } } else { // NRT, compare current taxoWriter.epoch() vs the one that was given at construction if (taxoEpoch != taxoWriter.getTaxonomyEpoch()) { recreated = true; } } final DirectoryTaxonomyReader newtr; if (recreated) { // if recreated, do not reuse anything from this instace. the information // will be lazily computed by the new instance when needed. newtr = new DirectoryTaxonomyReader(r2, taxoWriter, null, null, null); } else { newtr = new DirectoryTaxonomyReader(r2, taxoWriter, ordinalCache, categoryCache, taxoArrays); } success = true; return newtr; } finally { if (!success) { IOUtils.closeWhileHandlingException(r2); } } }
Example 14
Source File: TestNRTCachingDirectory.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testNRTAndCommit() throws Exception { Directory dir = newDirectory(); NRTCachingDirectory cachedDir = new NRTCachingDirectory(dir, 2.0, 25.0); MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH)); IndexWriterConfig conf = newIndexWriterConfig(analyzer); RandomIndexWriter w = new RandomIndexWriter(random(), cachedDir, conf); final LineFileDocs docs = new LineFileDocs(random()); final int numDocs = TestUtil.nextInt(random(), 100, 400); if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs); } final List<BytesRef> ids = new ArrayList<>(); DirectoryReader r = null; for(int docCount=0;docCount<numDocs;docCount++) { final Document doc = docs.nextDoc(); ids.add(new BytesRef(doc.get("docid"))); w.addDocument(doc); if (random().nextInt(20) == 17) { if (r == null) { r = DirectoryReader.open(w.w); } else { final DirectoryReader r2 = DirectoryReader.openIfChanged(r); if (r2 != null) { r.close(); r = r2; } } assertEquals(1+docCount, r.numDocs()); final IndexSearcher s = newSearcher(r); // Just make sure search can run; we can't assert // totHits since it could be 0 TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10); // System.out.println("tot hits " + hits.totalHits); } } if (r != null) { r.close(); } // Close should force cache to clear since all files are sync'd w.close(); final String[] cachedFiles = cachedDir.listCachedFiles(); for(String file : cachedFiles) { System.out.println("FAIL: cached file " + file + " remains after sync"); } assertEquals(0, cachedFiles.length); r = DirectoryReader.open(dir); for(BytesRef id : ids) { assertEquals(1, r.docFreq(new Term("docid", id))); } r.close(); cachedDir.close(); docs.close(); }
Example 15
Source File: HttpReplicatorTest.java From lucene-solr with Apache License 2.0 | 4 votes |
private void reopenReader() throws IOException { DirectoryReader newReader = DirectoryReader.openIfChanged(reader); assertNotNull(newReader); reader.close(); reader = newReader; }
Example 16
Source File: NearRealtimeReaderTask.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public int doLogic() throws Exception { final PerfRunData runData = getRunData(); // Get initial reader IndexWriter w = runData.getIndexWriter(); if (w == null) { throw new RuntimeException("please open the writer before invoking NearRealtimeReader"); } if (runData.getIndexReader() != null) { throw new RuntimeException("please close the existing reader before invoking NearRealtimeReader"); } long t = System.currentTimeMillis(); DirectoryReader r = DirectoryReader.open(w); runData.setIndexReader(r); // Transfer our reference to runData r.decRef(); // TODO: gather basic metrics for reporting -- eg mean, // stddev, min/max reopen latencies // Parent sequence sets stopNow reopenCount = 0; while(!stopNow) { long waitForMsec = (pauseMSec - (System.currentTimeMillis() - t)); if (waitForMsec > 0) { Thread.sleep(waitForMsec); //System.out.println("NRT wait: " + waitForMsec + " msec"); } t = System.currentTimeMillis(); final DirectoryReader newReader = DirectoryReader.openIfChanged(r); if (newReader != null) { final int delay = (int) (System.currentTimeMillis()-t); if (reopenTimes.length == reopenCount) { reopenTimes = ArrayUtil.grow(reopenTimes, 1+reopenCount); } reopenTimes[reopenCount++] = delay; // TODO: somehow we need to enable warming, here runData.setIndexReader(newReader); // Transfer our reference to runData newReader.decRef(); r = newReader; } } stopNow = false; return reopenCount; }
Example 17
Source File: BaseDirectoryTestSuite.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
@Test public void testCreateIndex() throws IOException { long s = System.nanoTime(); IndexWriterConfig conf = new IndexWriterConfig(LuceneVersionConstant.LUCENE_VERSION, new KeywordAnalyzer()); IndexDeletionPolicyReader indexDeletionPolicy = new IndexDeletionPolicyReader( new KeepOnlyLastCommitDeletionPolicy()); conf.setIndexDeletionPolicy(indexDeletionPolicy); FSDirectory control = FSDirectory.open(fileControl); Directory dir = getControlDir(control, directory); // The serial merge scheduler can be useful for debugging. // conf.setMergeScheduler(new SerialMergeScheduler()); IndexWriter writer = new IndexWriter(dir, conf); int numDocs = 1000; DirectoryReader reader = null; long gen = 0; for (int i = 0; i < 100; i++) { if (reader == null) { reader = DirectoryReader.open(writer, true); gen = reader.getIndexCommit().getGeneration(); indexDeletionPolicy.register(gen); } else { DirectoryReader old = reader; reader = DirectoryReader.openIfChanged(old, writer, true); if (reader == null) { reader = old; } else { long newGen = reader.getIndexCommit().getGeneration(); indexDeletionPolicy.register(newGen); indexDeletionPolicy.unregister(gen); old.close(); gen = newGen; } } assertEquals(i * numDocs, reader.numDocs()); IndexSearcher searcher = new IndexSearcher(reader); NumericRangeQuery<Integer> query = NumericRangeQuery.newIntRange("id", 42, 42, true, true); TopDocs topDocs = searcher.search(query, 10); assertEquals(i, topDocs.totalHits); addDocuments(writer, numDocs); } writer.close(false); reader.close(); long e = System.nanoTime(); System.out.println("Total time [" + (e - s) / 1000000.0 + " ms]"); }