Java Code Examples for org.apache.lucene.index.IndexWriter#commit()
The following examples show how to use
org.apache.lucene.index.IndexWriter#commit() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LuceneIndexer.java From MtgDesktopCompanion with GNU General Public License v3.0 | 6 votes |
public void initIndex() throws IOException { if(dir==null) open(); IndexWriterConfig iwc = new IndexWriterConfig(analyzer); iwc.setOpenMode(OpenMode.CREATE); IndexWriter indexWriter = new IndexWriter(dir, iwc); for(MagicCard mc : MTGControler.getInstance().getEnabled(MTGCardsProvider.class).listAllCards()) { try { indexWriter.addDocument(toDocuments(mc)); } catch (IllegalArgumentException e) { logger.error("Error indexing " + mc + " " + mc.getCurrentSet(),e); } } indexWriter.commit(); indexWriter.close(); }
Example 2
Source File: IndexTrimCommand.java From clue with Apache License 2.0 | 6 votes |
@Override public void execute(Namespace args, PrintStream out) throws Exception { int trimPercent = args.getInt("percent"); if (trimPercent < 0 || trimPercent > 100) { throw new IllegalArgumentException("invalid percent: " + trimPercent); } IndexWriter writer = ctx.getIndexWriter(); if (writer != null) { IndexReader reader = ctx.getIndexReader(); writer.deleteDocuments(buildDeleteQuery(trimPercent, reader.maxDoc())); writer.commit(); ctx.refreshReader(); reader = ctx.getIndexReader(); out.println("trim successful, index now contains: " + reader.numDocs() + " docs."); } else { out.println("unable to open writer, index is in readonly mode"); } }
Example 3
Source File: TestPerFieldPostingsFormat2.java From lucene-solr with Apache License 2.0 | 6 votes |
@Test public void testMergeUnusedPerFieldCodec() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwconf = newIndexWriterConfig(new MockAnalyzer(random())) .setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); IndexWriter writer = newWriter(dir, iwconf); addDocs(writer, 10); writer.commit(); addDocs3(writer, 10); writer.commit(); addDocs2(writer, 10); writer.commit(); assertEquals(30, writer.getDocStats().maxDoc); TestUtil.checkIndex(dir); writer.forceMerge(1); assertEquals(30, writer.getDocStats().maxDoc); writer.close(); dir.close(); }
Example 4
Source File: InternalEngine.java From Elasticsearch with Apache License 2.0 | 6 votes |
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { try { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); logger.trace("committing writer with translog id [{}] and sync id [{}] ", translogGeneration.translogFileGeneration, syncId); Map<String, String> commitData = new HashMap<>(2); commitData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGeneration.translogFileGeneration)); commitData.put(Translog.TRANSLOG_UUID_KEY, translogGeneration.translogUUID); if (syncId != null) { commitData.put(Engine.SYNC_COMMIT_ID, syncId); } indexWriter.setCommitData(commitData); writer.commit(); } catch (Throwable ex) { failEngine("lucene commit failed", ex); throw ex; } }
Example 5
Source File: BasicStorageTest.java From lumongo with Apache License 2.0 | 6 votes |
@BeforeClass public static void cleanDatabaseAndInit() throws Exception { MongoClient mongo = TestHelper.getMongo(); mongo.dropDatabase(TestHelper.TEST_DATABASE_NAME); directory = new DistributedDirectory(new MongoDirectory(mongo, TestHelper.TEST_DATABASE_NAME, STORAGE_TEST_INDEX, false)); StandardAnalyzer analyzer = new StandardAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(analyzer); IndexWriter w = new IndexWriter(directory, config); addDoc(w, "Random perl Title that is long", "id-1"); addDoc(w, "Random java Title that is long", "id-1"); addDoc(w, "MongoDB is awesome", "id-2"); addDoc(w, "This is a long title with nothing interesting", "id-3"); addDoc(w, "Java is awesome", "id-4"); addDoc(w, "Really big fish", "id-5"); w.commit(); w.close(); }
Example 6
Source File: TestControlledRealTimeReopenThread.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testListenerCalled() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null)); final AtomicBoolean afterRefreshCalled = new AtomicBoolean(false); SearcherManager sm = new SearcherManager(iw, new SearcherFactory()); sm.addListener(new ReferenceManager.RefreshListener() { @Override public void beforeRefresh() { } @Override public void afterRefresh(boolean didRefresh) { if (didRefresh) { afterRefreshCalled.set(true); } } }); iw.addDocument(new Document()); iw.commit(); assertFalse(afterRefreshCalled.get()); sm.maybeRefreshBlocking(); assertTrue(afterRefreshCalled.get()); sm.close(); iw.close(); dir.close(); }
Example 7
Source File: UseLucene.java From mmseg4j-solr with Apache License 2.0 | 5 votes |
@Test public void test() throws IOException { IndexWriterConfig iwc = new IndexWriterConfig(new StandardAnalyzer()); IndexWriter iw = new IndexWriter(dir, iwc); iw.addDocument(createDoc(1)); iw.addDocument(createDoc(2)); iw.commit(); iw.close(); }
Example 8
Source File: TikaLuceneContentExtractorTest.java From cxf with Apache License 2.0 | 5 votes |
@Before public void setUp() throws Exception { final Analyzer analyzer = new StandardAnalyzer(); tempDirectory = Files.createTempDirectory("lucene"); directory = new MMapDirectory(tempDirectory); IndexWriterConfig config = new IndexWriterConfig(analyzer); writer = new IndexWriter(directory, config); writer.commit(); parser = new FiqlParser<>(SearchBean.class); extractor = new TikaLuceneContentExtractor(new PDFParser()); }
Example 9
Source File: TestLucene.java From RedisDirectory with Apache License 2.0 | 5 votes |
public void testRedisDirectoryWithJedis() throws IOException { long start = System.currentTimeMillis(); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig .OpenMode.CREATE); //indexWriterConfig.setInfoStream(System.out); //indexWriterConfig.setRAMBufferSizeMB(2048); //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); //logByteSizeMergePolicy.setMinMergeMB(1); //logByteSizeMergePolicy.setMaxMergeMB(64); //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64); //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false); //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig(); //获取连接等待时间 //genericObjectPoolConfig.setMaxWaitMillis(3000); //10s超时时间 RedisDirectory redisDirectory = new RedisDirectory(new JedisStream("localhost", 6379)); IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig); for (int i = 0; i < 10000000; i++) { indexWriter.addDocument(addDocument(i)); } indexWriter.commit(); indexWriter.close(); redisDirectory.close(); long end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedis consumes {}s!", (end - start) / 1000); start = System.currentTimeMillis(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost", 6379)))); int total = 0; for (int i = 0; i < 10000000; i++) { TermQuery key1 = new TermQuery(new Term("key1", "key" + i)); TopDocs search = indexSearcher.search(key1, 10); total += search.totalHits; } System.out.println(total); end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedis search consumes {}ms!", (end - start)); }
Example 10
Source File: TripleIndexCreatorContext.java From AGDISTIS with GNU Affero General Public License v3.0 | 5 votes |
public void createIndex(List<File> files, String idxDirectory, String baseURI) { try { urlAnalyzer = new SimpleAnalyzer(LUCENE_VERSION); literalAnalyzer = new LiteralAnalyzer(LUCENE_VERSION); Map<String, Analyzer> mapping = new HashMap<String, Analyzer>(); mapping.put(FIELD_NAME_URI, urlAnalyzer); mapping.put(FIELD_NAME_SURFACE_FORM, literalAnalyzer); mapping.put(FIELD_NAME_URI_COUNT, literalAnalyzer); mapping.put(FIELD_NAME_CONTEXT, literalAnalyzer); PerFieldAnalyzerWrapper perFieldAnalyzer = new PerFieldAnalyzerWrapper(urlAnalyzer, mapping); File indexDirectory = new File(idxDirectory); indexDirectory.mkdir(); directory = new MMapDirectory(indexDirectory); IndexWriterConfig config = new IndexWriterConfig(LUCENE_VERSION, perFieldAnalyzer); iwriter = new IndexWriter(directory, config); iwriter.commit(); for (File file : files) { String type = FileUtil.getFileExtension(file.getName()); if (type.equals(TTL)) indexTTLFile(file, baseURI); iwriter.commit(); } } catch (Exception e) { log.error("Error while creating TripleIndex.", e); } }
Example 11
Source File: MergeCommand.java From clue with Apache License 2.0 | 5 votes |
@Override public void execute(Namespace args, PrintStream out) throws Exception { int count = args.getInt("num"); IndexWriter writer = ctx.getIndexWriter(); if (writer != null) { writer.forceMerge(count, true); writer.commit(); ctx.refreshReader(); } else { out.println("unable to open index writer, index is in readonly mode"); } }
Example 12
Source File: TestLucene.java From RedisDirectory with Apache License 2.0 | 5 votes |
public void testRedisDirectoryWithJedis() throws IOException { long start = System.currentTimeMillis(); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig .OpenMode.CREATE); //indexWriterConfig.setInfoStream(System.out); //indexWriterConfig.setRAMBufferSizeMB(2048); //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); //logByteSizeMergePolicy.setMinMergeMB(1); //logByteSizeMergePolicy.setMaxMergeMB(64); //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64); //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false); //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig(); //获取连接等待时间 //genericObjectPoolConfig.setMaxWaitMillis(3000); //10s超时时间 RedisDirectory redisDirectory = new RedisDirectory(new JedisStream("localhost", 6379)); IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig); for (int i = 0; i < 10000000; i++) { indexWriter.addDocument(addDocument(i)); } indexWriter.commit(); indexWriter.close(); redisDirectory.close(); long end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedis consumes {}s!", (end - start) / 1000); start = System.currentTimeMillis(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost", 6379)))); int total = 0; for (int i = 0; i < 10000000; i++) { TermQuery key1 = new TermQuery(new Term("key1", "key" + i)); TopDocs search = indexSearcher.search(key1, 10); total += search.totalHits; } System.out.println(total); end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedis search consumes {}ms!", (end - start)); }
Example 13
Source File: TestLucene.java From RedisDirectory with Apache License 2.0 | 5 votes |
public void testRedisDirectoryWithJedisPool() throws IOException { long start = System.currentTimeMillis(); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig .OpenMode.CREATE); //indexWriterConfig.setInfoStream(System.out); //indexWriterConfig.setRAMBufferSizeMB(2048); //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); //logByteSizeMergePolicy.setMinMergeMB(1); //logByteSizeMergePolicy.setMaxMergeMB(64); //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64); //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false); //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig(); //获取连接等待时间 //genericObjectPoolConfig.setMaxWaitMillis(3000); //10s超时时间 JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379, Constants.TIME_OUT); RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool)); IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig); for (int i = 0; i < 10000000; i++) { indexWriter.addDocument(addDocument(i)); } indexWriter.commit(); indexWriter.close(); redisDirectory.close(); long end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000); start = System.currentTimeMillis(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost", 6379)))); int total = 0; for (int i = 0; i < 10000000; i++) { TermQuery key1 = new TermQuery(new Term("key1", "key" + i)); TopDocs search = indexSearcher.search(key1, 10); total += search.totalHits; } System.out.println(total); end = System.currentTimeMillis(); log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start)); }
Example 14
Source File: Catalog.java From cxf with Apache License 2.0 | 5 votes |
@DELETE public Response delete() throws IOException { final IndexWriter writer = getIndexWriter(); try { storage.deleteAll(); writer.deleteAll(); writer.commit(); } finally { writer.close(); } return Response.ok().build(); }
Example 15
Source File: SessionTokenTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testSerialization() throws IOException { Directory dir = newDirectory(); IndexWriterConfig conf = new IndexWriterConfig(null); conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy())); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(new Document()); writer.commit(); Revision rev = new IndexRevision(writer); SessionToken session1 = new SessionToken("17", rev); ByteArrayOutputStream baos = new ByteArrayOutputStream(); session1.serialize(new DataOutputStream(baos)); byte[] b = baos.toByteArray(); SessionToken session2 = new SessionToken(new DataInputStream(new ByteArrayInputStream(b))); assertEquals(session1.id, session2.id); assertEquals(session1.version, session2.version); assertEquals(1, session2.sourceFiles.size()); assertEquals(session1.sourceFiles.size(), session2.sourceFiles.size()); assertEquals(session1.sourceFiles.keySet(), session2.sourceFiles.keySet()); List<RevisionFile> files1 = session1.sourceFiles.values().iterator().next(); List<RevisionFile> files2 = session2.sourceFiles.values().iterator().next(); assertEquals(files1, files2); writer.close(); IOUtils.close(dir); }
Example 16
Source File: LuceneIndexer.java From taoshop with Apache License 2.0 | 5 votes |
public boolean createIndex(String indexDir) throws IOException{ //加点测试的静态数据 Integer ids[] = {1 , 2 , 3}; String titles[] = {"标题1" , "标题2" , "标题3"}; String tcontents[] = { "内容1内容啊哈哈哈", "内容2内容啊哈哈哈", "内容3内容啊哈哈哈" }; long startTime = System.currentTimeMillis();//记录索引开始时间 Analyzer analyzer = new SmartChineseAnalyzer(); Directory directory = FSDirectory.open(Paths.get(indexDir)); IndexWriterConfig config = new IndexWriterConfig(analyzer); IndexWriter indexWriter = new IndexWriter(directory, config); for(int i = 0; i < ids.length;i++){ Document doc = new Document(); //添加字段 doc.add(new TextField("id", ids[i].toString(),Field.Store.YES)); //添加内容 doc.add(new TextField("title", titles[i], Field.Store.YES)); //添加文件名,并把这个字段存到索引文件里 doc.add(new TextField("tcontent", tcontents[i], Field.Store.YES)); //添加文件路径 indexWriter.addDocument(doc); } indexWriter.commit(); System.out.println("共索引了"+indexWriter.numDocs()+"个文件"); indexWriter.close(); System.out.println("创建索引所用时间:"+(System.currentTimeMillis()-startTime)+"毫秒"); return true; }
Example 17
Source File: Store.java From crate with Apache License 2.0 | 4 votes |
private void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException { final Map<String, String> userData = getUserData(writer); userData.putAll(keysToUpdate); writer.setLiveCommitData(userData.entrySet()); writer.commit(); }
Example 18
Source File: TestExternalCodecs.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testPerFieldCodec() throws Exception { final int NUM_DOCS = atLeast(173); if (VERBOSE) { System.out.println("TEST: NUM_DOCS=" + NUM_DOCS); } BaseDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); // we use a custom codec provider IndexWriter w = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())). setCodec(new CustomPerFieldCodec()). setMergePolicy(newLogMergePolicy(3)) ); Document doc = new Document(); // uses default codec: doc.add(newTextField("field1", "this field uses the standard codec as the test", Field.Store.NO)); // uses memory codec: Field field2 = newTextField("field2", "this field uses the memory codec as the test", Field.Store.NO); doc.add(field2); Field idField = newStringField("id", "", Field.Store.NO); doc.add(idField); for(int i=0;i<NUM_DOCS;i++) { idField.setStringValue(""+i); w.addDocument(doc); if ((i+1)%10 == 0) { w.commit(); } } if (VERBOSE) { System.out.println("TEST: now delete id=77"); } w.deleteDocuments(new Term("id", "77")); IndexReader r = DirectoryReader.open(w); assertEquals(NUM_DOCS-1, r.numDocs()); IndexSearcher s = newSearcher(r); assertEquals(NUM_DOCS-1, s.count(new TermQuery(new Term("field1", "standard")))); assertEquals(NUM_DOCS-1, s.count(new TermQuery(new Term("field2", "memory")))); r.close(); if (VERBOSE) { System.out.println("\nTEST: now delete 2nd doc"); } w.deleteDocuments(new Term("id", "44")); if (VERBOSE) { System.out.println("\nTEST: now force merge"); } w.forceMerge(1); if (VERBOSE) { System.out.println("\nTEST: now open reader"); } r = DirectoryReader.open(w); assertEquals(NUM_DOCS-2, r.maxDoc()); assertEquals(NUM_DOCS-2, r.numDocs()); s = newSearcher(r); assertEquals(NUM_DOCS-2, s.count(new TermQuery(new Term("field1", "standard")))); assertEquals(NUM_DOCS-2, s.count(new TermQuery(new Term("field2", "memory")))); assertEquals(1, s.count(new TermQuery(new Term("id", "76")))); assertEquals(0, s.count(new TermQuery(new Term("id", "77")))); assertEquals(0, s.count(new TermQuery(new Term("id", "44")))); if (VERBOSE) { System.out.println("\nTEST: now close NRT reader"); } r.close(); w.close(); dir.close(); }
Example 19
Source File: TestCompressingStoredFieldsFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testDeletePartiallyWrittenFilesIfAbort() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random())); iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30)); iwConf.setCodec(getCodec()); // disable CFS because this test checks file names iwConf.setMergePolicy(newLogMergePolicy(false)); iwConf.setUseCompoundFile(false); // Cannot use RIW because this test wants CFS to stay off: IndexWriter iw = new IndexWriter(dir, iwConf); final Document validDoc = new Document(); validDoc.add(new IntPoint("id", 0)); validDoc.add(new StoredField("id", 0)); iw.addDocument(validDoc); iw.commit(); // make sure that #writeField will fail to trigger an abort final Document invalidDoc = new Document(); FieldType fieldType = new FieldType(); fieldType.setStored(true); invalidDoc.add(new Field("invalid", fieldType) { @Override public String stringValue() { // TODO: really bad & scary that this causes IW to // abort the segment!! We should fix this. return null; } }); try { iw.addDocument(invalidDoc); iw.commit(); } catch(IllegalArgumentException iae) { // expected assertEquals(iae, iw.getTragicException()); } // Writer should be closed by tragedy assertFalse(iw.isOpen()); dir.close(); }
Example 20
Source File: LuceneIndexWriterExtensionTest.java From cava with Apache License 2.0 | 4 votes |
@Test void shouldHaveAccessToLuceneIndexWriter(@LuceneIndexWriter IndexWriter indexWriter) throws Exception { assertTrue(indexWriter.isOpen()); indexWriter.addDocument(new Document()); indexWriter.commit(); }