Java Code Examples for org.apache.lucene.index.DirectoryReader#open()
The following examples show how to use
org.apache.lucene.index.DirectoryReader#open() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AssociationsFacetsExample.java From lucene-solr with Apache License 2.0 | 6 votes |
/** User runs a query and aggregates facets by summing their association values. */ private List<FacetResult> sumAssociations() throws IOException { DirectoryReader indexReader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(indexReader); TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir); FacetsCollector fc = new FacetsCollector(); // MatchAllDocsQuery is for "browsing" (counts facets // for all non-deleted docs in the index); normally // you'd use a "normal" query: FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc); Facets tags = new TaxonomyFacetSumIntAssociations("$tags", taxoReader, config, fc); Facets genre = new TaxonomyFacetSumFloatAssociations("$genre", taxoReader, config, fc); // Retrieve results List<FacetResult> results = new ArrayList<>(); results.add(tags.getTopChildren(10, "tags")); results.add(genre.getTopChildren(10, "genre")); indexReader.close(); taxoReader.close(); return results; }
Example 2
Source File: HdfsDirectorySymlinkTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testSymlinkWithIndexes() throws IOException { HdfsDirectory dir1 = new HdfsDirectory(_configuration, new Path(_base, "dir1")); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer()); IndexWriter writer1 = new IndexWriter(dir1, conf.clone()); writer1.addDocument(getDoc()); writer1.close(); HdfsDirectory dir2 = new HdfsDirectory(_configuration, new Path(_base, "dir2")); IndexWriter writer2 = new IndexWriter(dir2, conf.clone()); writer2.addIndexes(dir1); writer2.close(); DirectoryReader reader1 = DirectoryReader.open(dir1); DirectoryReader reader2 = DirectoryReader.open(dir2); assertEquals(1, reader1.maxDoc()); assertEquals(1, reader2.maxDoc()); assertEquals(1, reader1.numDocs()); assertEquals(1, reader2.numDocs()); Document document1 = reader1.document(0); Document document2 = reader2.document(0); assertEquals(document1.get("id"), document2.get("id")); }
Example 3
Source File: AddIndexesTask.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public int doLogic() throws Exception { IndexWriter writer = getRunData().getIndexWriter(); if (useAddIndexesDir) { writer.addIndexes(inputDir); } else { try (IndexReader r = DirectoryReader.open(inputDir)) { CodecReader leaves[] = new CodecReader[r.leaves().size()]; int i = 0; for (LeafReaderContext leaf : r.leaves()) { leaves[i++] = SlowCodecReaderWrapper.wrap(leaf.reader()); } writer.addIndexes(leaves); } } return 1; }
Example 4
Source File: RangeFacetsExample.java From lucene-solr with Apache License 2.0 | 6 votes |
/** Build the example index. */ public void index() throws IOException { IndexWriter indexWriter = new IndexWriter(indexDir, new IndexWriterConfig( new WhitespaceAnalyzer()).setOpenMode(OpenMode.CREATE)); // Add documents with a fake timestamp, 1000 sec before // "now", 2000 sec before "now", ...: for(int i=0;i<100;i++) { Document doc = new Document(); long then = nowSec - i * 1000; // Add as doc values field, so we can compute range facets: doc.add(new NumericDocValuesField("timestamp", then)); // Add as numeric field so we can drill-down: doc.add(new LongPoint("timestamp", then)); indexWriter.addDocument(doc); } // Open near-real-time searcher searcher = new IndexSearcher(DirectoryReader.open(indexWriter)); indexWriter.close(); }
Example 5
Source File: TestControlledRealTimeReopenThread.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); final SearcherFactory theEvilOne = new SearcherFactory() { @Override public IndexSearcher newSearcher(IndexReader ignored, IndexReader previous) { return LuceneTestCase.newSearcher(other); } }; expectThrows(IllegalStateException.class, () -> { new SearcherManager(w.w, false, false, theEvilOne); }); w.close(); other.close(); dir.close(); }
Example 6
Source File: TestMultiTermQueryRewrites.java From lucene-solr with Apache License 2.0 | 5 votes |
@BeforeClass public static void beforeClass() throws Exception { dir = newDirectory(); sdir1 = newDirectory(); sdir2 = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random())); final RandomIndexWriter swriter1 = new RandomIndexWriter(random(), sdir1, new MockAnalyzer(random())); final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random())); for (int i = 0; i < 10; i++) { Document doc = new Document(); doc.add(newStringField("data", Integer.toString(i), Field.Store.NO)); writer.addDocument(doc); ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc); } writer.forceMerge(1); swriter1.forceMerge(1); swriter2.forceMerge(1); writer.close(); swriter1.close(); swriter2.close(); reader = DirectoryReader.open(dir); searcher = newSearcher(reader); multiReader = new MultiReader(new IndexReader[] { DirectoryReader.open(sdir1), DirectoryReader.open(sdir2) }, true); multiSearcher = newSearcher(multiReader); multiReaderDupls = new MultiReader(new IndexReader[] { DirectoryReader.open(sdir1), DirectoryReader.open(dir) }, true); multiSearcherDupls = newSearcher(multiReaderDupls); }
Example 7
Source File: FilterCacheTest.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
@Test public void test1() throws IOException { Filter filter = new QueryWrapperFilter(new TermQuery(new Term("f1", "t1"))); FilterCache filterCache = new FilterCache("filter1", filter); RAMDirectory directory = new RAMDirectory(); writeDocs(filterCache, directory); DirectoryReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("f2", "t2")); TopDocs topDocs1 = searcher.search(query, filterCache, 10); assertEquals(1, filterCache.getMisses()); assertEquals(0, filterCache.getHits()); assertEquals(1, topDocs1.totalHits); TopDocs topDocs2 = searcher.search(query, filterCache, 10); assertEquals(1, filterCache.getMisses()); assertEquals(1, filterCache.getHits()); assertEquals(1, topDocs2.totalHits); TopDocs topDocs3 = searcher.search(query, filterCache, 10); assertEquals(1, filterCache.getMisses()); assertEquals(2, filterCache.getHits()); assertEquals(1, topDocs3.totalHits); }
Example 8
Source File: ExpressionAggregationFacetsExample.java From lucene-solr with Apache License 2.0 | 5 votes |
/** User runs a query and aggregates facets. */ private FacetResult search() throws IOException, ParseException { DirectoryReader indexReader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(indexReader); TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir); // Aggregate categories by an expression that combines the document's score // and its popularity field Expression expr = JavascriptCompiler.compile("_score * sqrt(popularity)"); SimpleBindings bindings = new SimpleBindings(); bindings.add("_score", DoubleValuesSource.SCORES); // the score of the document bindings.add("popularity", DoubleValuesSource.fromLongField("popularity")); // the value of the 'popularity' field // Aggregates the facet values FacetsCollector fc = new FacetsCollector(true); // MatchAllDocsQuery is for "browsing" (counts facets // for all non-deleted docs in the index); normally // you'd use a "normal" query: FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc); // Retrieve results Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, expr.getDoubleValuesSource(bindings)); FacetResult result = facets.getTopChildren(10, "A"); indexReader.close(); taxoReader.close(); return result; }
Example 9
Source File: IndexBasedSpellChecker.java From lucene-solr with Apache License 2.0 | 5 votes |
private void initSourceReader() { if (sourceLocation != null) { try { FSDirectory luceneIndexDir = FSDirectory.open(new File(sourceLocation).toPath()); this.reader = DirectoryReader.open(luceneIndexDir); } catch (IOException e) { throw new RuntimeException(e); } } }
Example 10
Source File: IndexReplicationClientTest.java From lucene-solr with Apache License 2.0 | 5 votes |
public IndexReadyCallback(Directory indexDir) throws IOException { this.indexDir = indexDir; if (DirectoryReader.indexExists(indexDir)) { reader = DirectoryReader.open(indexDir); lastGeneration = reader.getIndexCommit().getGeneration(); } }
Example 11
Source File: OrderedLuceneBatchIteratorFactoryTest.java From crate with Apache License 2.0 | 5 votes |
@Before public void prepareSearchers() throws Exception { IndexWriter iw1 = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); IndexWriter iw2 = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); expectedResult = LongStream.range(0, 20) .mapToObj(i -> new Object[]{i}) .collect(Collectors.toList()); // expect descending order to differentiate between insert order expectedResult.sort(Comparator.comparingLong((Object[] o) -> ((long) o[0])).reversed()); for (int i = 0; i < 20; i++) { Document doc = new Document(); doc.add(new NumericDocValuesField(columnName, i)); if (i % 2 == 0) { iw1.addDocument(doc); } else { iw2.addDocument(doc); } } iw1.commit(); iw2.commit(); searcher1 = new IndexSearcher(DirectoryReader.open(iw1)); searcher2 = new IndexSearcher(DirectoryReader.open(iw2)); orderBy = new OrderBy( Collections.singletonList(reference), reverseFlags, nullsFirst ); }
Example 12
Source File: MtasSearchTestConsistency.java From mtas with Apache License 2.0 | 5 votes |
/** * Collect stats positions 1. * * @throws IOException Signals that an I/O exception has occurred. */ @org.junit.Test public void collectStatsPositions1() throws IOException { // get total number of words IndexReader indexReader = DirectoryReader.open(directory); QueryResult queryResult = doQuery(indexReader, FIELD_CONTENT, "[]", null, null, null, false); indexReader.close(); int averageNumberOfPositions = queryResult.hits / queryResult.docs; // do position query try { ArrayList<Integer> fullDocSet = docs; ComponentField fieldStats = new ComponentField(FIELD_ID); fieldStats.statsPositionList .add(new ComponentPosition("total", null, null, "all")); fieldStats.statsPositionList.add(new ComponentPosition("minimum", (double) (averageNumberOfPositions - 1), null, "n,sum,mean,min,max")); fieldStats.statsPositionList.add(new ComponentPosition("maximum", null, (double) averageNumberOfPositions, "sum")); Map<String, HashMap<String, Object>> response = doAdvancedSearch( fullDocSet, fieldStats); Map<String, Object> responseTotal = (Map<String, Object>) response .get("statsPositions").get("total"); Map<String, Object> responseMinimum = (Map<String, Object>) response .get("statsPositions").get("minimum"); Map<String, Object> responseMaximum = (Map<String, Object>) response .get("statsPositions").get("maximum"); Double total = responseTotal != null ? (Double) responseTotal.get("sum") : 0; Long totalMinimum = responseTotal != null ? (Long) responseMinimum.get("sum") : 0; Long totalMaximum = responseTotal != null ? (Long) responseMaximum.get("sum") : 0; assertEquals("Number of positions", total.longValue(), queryResult.hits); assertEquals("Minimum and maximum on number of positions", total.longValue(), totalMinimum + totalMaximum); } catch (mtas.parser.function.ParseException e) { log.error(e); } }
Example 13
Source File: MtasSearchTestConsistency.java From mtas with Apache License 2.0 | 5 votes |
/** * Basic search preceded by 1. * * @throws IOException Signals that an I/O exception has occurred. */ @org.junit.Test public void basicSearchPrecededBy1() throws IOException { String cql1 = "[pos=\"ADJ\"] precededby [pos=\"LID\"][]?"; String cql2 = "[pos=\"LID\"][]?[pos=\"ADJ\"]"; String cql3 = "[pos=\"LID\"][pos=\"LID\"][pos=\"ADJ\"]"; // get total number IndexReader indexReader = DirectoryReader.open(directory); QueryResult queryResult1 = doQuery(indexReader, FIELD_CONTENT, cql1, null, null, null, false); QueryResult queryResult1disabled = doQuery(indexReader, FIELD_CONTENT, cql1, null, null, null, true); QueryResult queryResult2 = doQuery(indexReader, FIELD_CONTENT, cql2, null, null, null, false); QueryResult queryResult2disabled = doQuery(indexReader, FIELD_CONTENT, cql2, null, null, null, true); QueryResult queryResult3 = doQuery(indexReader, FIELD_CONTENT, cql3, null, null, null, false); QueryResult queryResult3disabled = doQuery(indexReader, FIELD_CONTENT, cql3, null, null, null, true); assertEquals("Adjective preceded by Article", queryResult1.hits, (long) queryResult2.hits - queryResult3.hits); assertEquals("Adjective preceded by Article - disabled twoPhaseIterator", queryResult1disabled.hits, (long) queryResult2disabled.hits - queryResult3disabled.hits); assertEquals("PrecededBy: twoPhaseIterator 1", queryResult1.hits, queryResult1disabled.hits); assertEquals("PrecededBy: twoPhaseIterator 2", queryResult2.hits, queryResult2disabled.hits); assertEquals("PrecededBy: twoPhaseIterator 3", queryResult3.hits, queryResult3disabled.hits); indexReader.close(); }
Example 14
Source File: SampleSearchDemo.java From elasticsearch-full with Apache License 2.0 | 4 votes |
public static void main(String []args) throws IOException, ParseException { Analyzer analyzer = new StandardAnalyzer(); Directory directory = FSDirectory.open(Paths.get("/Users/admin/lucene")); DirectoryReader ireader = DirectoryReader.open(directory); IndexSearcher indexSearcher = new IndexSearcher(ireader); QueryParser parser = new QueryParser("fieldname", analyzer); Query query = parser.parse("text"); ScoreDoc[] hits = indexSearcher.search(query, 10, Sort.INDEXORDER).scoreDocs; for (int i = 0; i < hits.length; i++) { Document hitDoc = indexSearcher.doc(hits[i].doc); System.out.println(hitDoc.toString()); } ireader.close(); directory.close(); }
Example 15
Source File: Catalog.java From cxf with Apache License 2.0 | 4 votes |
@GET @Produces(MediaType.APPLICATION_JSON) @CrossOriginResourceSharing(allowAllOrigins = true) @Path("/search") public Response findBook(@Context SearchContext searchContext, @Context final UriInfo uri) throws IOException { final IndexReader reader = DirectoryReader.open(directory); final IndexSearcher searcher = new IndexSearcher(reader); final JsonArrayBuilder builder = Json.createArrayBuilder(); try { visitor.reset(); visitor.visit(searchContext.getCondition(SearchBean.class)); final Query query = visitor.getQuery(); if (query != null) { final TopDocs topDocs = searcher.search(query, 1000); for (final ScoreDoc scoreDoc: topDocs.scoreDocs) { final Document document = reader.document(scoreDoc.doc); final String source = document .getField(LuceneDocumentMetadata.SOURCE_FIELD) .stringValue(); builder.add( Json.createObjectBuilder() .add("source", source) .add("score", scoreDoc.score) .add("url", uri.getBaseUriBuilder() .path(Catalog.class) .path(source) .build().toString()) ); } } return Response.ok(builder.build()).build(); } finally { reader.close(); } }
Example 16
Source File: FuzzyQueryDemo.java From elasticsearch-full with Apache License 2.0 | 4 votes |
public static void main(String []args) throws IOException { Analyzer analyzer = new StandardAnalyzer(); Directory directory = FSDirectory.open(Paths.get("/Users/admin/lucene")); DirectoryReader ireader = DirectoryReader.open(directory); IndexSearcher indexSearcher = new IndexSearcher(ireader); Term term = new Term("fieldname","国"); FuzzyQuery query = new FuzzyQuery(term); ScoreDoc[] hits = indexSearcher.search(query, 10, Sort.INDEXORDER).scoreDocs; for (int i = 0; i < hits.length; i++) { Document hitDoc = indexSearcher.doc(hits[i].doc); System.out.println(hitDoc.toString()+","+hits[i].score); } ireader.close(); directory.close(); }
Example 17
Source File: TestLRUQueryCache.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testBulkScorerLocking() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig() .setMergePolicy(NoMergePolicy.INSTANCE) // the test framework sometimes sets crazy low values, prevent this since we are indexing many docs .setMaxBufferedDocs(-1); IndexWriter w = new IndexWriter(dir, iwc); final int numDocs = atLeast(10); Document emptyDoc = new Document(); for (int d = 0; d < numDocs; ++d) { for (int i = random().nextInt(5000); i >= 0; --i) { w.addDocument(emptyDoc); } Document doc = new Document(); for (String value : Arrays.asList("foo", "bar", "baz")) { if (random().nextBoolean()) { doc.add(new StringField("field", value, Store.NO)); } } } for (int i = TestUtil.nextInt(random(), 3000, 5000); i >= 0; --i) { w.addDocument(emptyDoc); } if (random().nextBoolean()) { w.forceMerge(1); } DirectoryReader reader = DirectoryReader.open(w); DirectoryReader noCacheReader = new DummyDirectoryReader(reader); LRUQueryCache cache = new LRUQueryCache(1, 100000, context -> true, Float.POSITIVE_INFINITY); IndexSearcher searcher = new AssertingIndexSearcher(random(), reader); searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(ALWAYS_CACHE); Query query = new ConstantScoreQuery(new BooleanQuery.Builder() .add(new BoostQuery(new TermQuery(new Term("field", "foo")), 3), Occur.SHOULD) .add(new BoostQuery(new TermQuery(new Term("field", "bar")), 3), Occur.SHOULD) .add(new BoostQuery(new TermQuery(new Term("field", "baz")), 3), Occur.SHOULD) .build()); searcher.search(query, 1); IndexSearcher noCacheHelperSearcher = new AssertingIndexSearcher(random(), noCacheReader); noCacheHelperSearcher.setQueryCache(cache); noCacheHelperSearcher.setQueryCachingPolicy(ALWAYS_CACHE); noCacheHelperSearcher.search(query, 1); Thread t = new Thread(() -> { try { noCacheReader.close(); w.close(); dir.close(); } catch (Exception e) { throw new RuntimeException(e); } }); t.start(); t.join(); }
Example 18
Source File: TestPerfTasksLogic.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Test that IndexWriter settings stick. */ public void testIndexWriterSettings() throws Exception { // 1. alg definition (required in every "logic" test) String algLines[] = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "docs.file=" + getReuters20LinesFile(), "content.source.log.step=3", "ram.flush.mb=-1", "max.buffered=2", "compound=cmpnd:true:false", "doc.term.vector=vector:false:true", "content.source.forever=false", "directory=ByteBuffersDirectory", "doc.stored=false", "merge.factor=3", "doc.tokenized=false", "debug.level=1", "# ----- alg ", "{ \"Rounds\"", " ResetSystemErase", " CreateIndex", " { \"AddDocs\" AddDoc > : * ", " NewRound", "} : 2", }; // 2. execute the algorithm (required in every "logic" test) Benchmark benchmark = execBenchmark(algLines); final IndexWriter writer = benchmark.getRunData().getIndexWriter(); assertEquals(2, writer.getConfig().getMaxBufferedDocs()); assertEquals(IndexWriterConfig.DISABLE_AUTO_FLUSH, (int) writer.getConfig().getRAMBufferSizeMB()); assertEquals(3, ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor()); assertEquals(0.0d, writer.getConfig().getMergePolicy().getNoCFSRatio(), 0.0); writer.close(); Directory dir = benchmark.getRunData().getDirectory(); IndexReader reader = DirectoryReader.open(dir); Fields tfv = reader.getTermVectors(0); assertNotNull(tfv); assertTrue(tfv.size() > 0); reader.close(); }
Example 19
Source File: Blur022CodecTest.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
@Test public void testLargeDocs() throws IOException { Random random = new Random(); Iterable<? extends IndexableField> doc = getLargeDoc(random); RAMDirectory directory = new RAMDirectory(); IndexWriterConfig conf1 = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43)); conf1.setCodec(new Blur022Codec()); IndexWriter writer1 = new IndexWriter(directory, conf1); writer1.addDocument(doc); writer1.close(); DirectoryReader reader1 = DirectoryReader.open(directory); int numDocs1 = reader1.numDocs(); assertEquals(1, numDocs1); // for (int i = 0; i < numDocs1; i++) { // System.out.println(reader1.document(i)); // } IndexWriterConfig conf2 = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43)); conf2.setCodec(new Blur022Codec(1 << 16, CompressionMode.HIGH_COMPRESSION)); IndexWriter writer2 = new IndexWriter(directory, conf2); writer2.addDocument(doc); writer2.close(); DirectoryReader reader2 = DirectoryReader.open(directory); int numDocs2 = reader2.numDocs(); assertEquals(2, numDocs2); for (int i = 0; i < 2; i++) { long t1 = System.nanoTime(); Document document1 = reader1.document(0); long t2 = System.nanoTime(); Document document2 = reader2.document(1); long t3 = System.nanoTime(); System.out.println((t3 - t2) / 1000000.0); System.out.println((t2 - t1) / 1000000.0); System.out.println("doc1 " + document1.hashCode()); System.out.println("doc2 " + document2.hashCode()); } // for (int i = 0; i < numDocs2; i++) { // System.out.println(reader2.document(i)); // } // long fileLength = directory.fileLength("_0.fdt"); for (String name : directory.listAll()) { if (name.endsWith(".fdt")) { System.out.println(name); System.out.println(directory.fileLength(name)); } } }
Example 20
Source File: TermBoostQueryTest.java From querqy with Apache License 2.0 | 3 votes |
@Test public void testThatExternalBoostFactorIsApplied() throws Exception { final float fieldBoostFactor = 2f; final float externalBoostFactor = 3f; ConstantFieldBoost fieldBoost = new ConstantFieldBoost(fieldBoostFactor); Analyzer analyzer = new MockAnalyzer(random()); Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, analyzer); indexWriter.close(); IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader); final FieldBoostTermQueryBuilder.FieldBoostTermQuery tbq = new FieldBoostTermQueryBuilder.FieldBoostTermQuery(new Term("f1", "v1"), fieldBoost); final Weight weight = tbq.createWeight(indexSearcher, ScoreMode.COMPLETE, externalBoostFactor); assertTrue(weight instanceof FieldBoostTermQueryBuilder.FieldBoostTermQuery.FieldBoostWeight); final FieldBoostTermQueryBuilder.FieldBoostTermQuery.FieldBoostWeight tbw = (FieldBoostTermQueryBuilder.FieldBoostTermQuery.FieldBoostWeight) weight; assertEquals(fieldBoostFactor * externalBoostFactor, tbw.getScore(), 0.0001f); indexReader.close(); directory.close(); analyzer.close(); }