Java Code Examples for org.apache.lucene.util.TestUtil#getDefaultCodec()
The following examples show how to use
org.apache.lucene.util.TestUtil#getDefaultCodec() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseMergePolicyTestCase.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Make a new {@link SegmentCommitInfo} with the given {@code maxDoc}, * {@code numDeletedDocs} and {@code sizeInBytes}, which are usually the * numbers that merge policies care about. */ protected static SegmentCommitInfo makeSegmentCommitInfo(String name, int maxDoc, int numDeletedDocs, double sizeMB, String source) { if (name.startsWith("_") == false) { throw new IllegalArgumentException("name must start with an _, got " + name); } byte[] id = new byte[StringHelper.ID_LENGTH]; random().nextBytes(id); SegmentInfo info = new SegmentInfo(FAKE_DIRECTORY, Version.LATEST, Version.LATEST, name, maxDoc, false, TestUtil.getDefaultCodec(), Collections.emptyMap(), id, Collections.singletonMap(IndexWriter.SOURCE, source), null); info.setFiles(Collections.singleton(name + "_size=" + Long.toString((long) (sizeMB * 1024 * 1024)) + ".fake")); return new SegmentCommitInfo(info, numDeletedDocs, 0, 0, 0, 0, StringHelper.randomId()); }
Example 2
Source File: AssertingCodec.java From lucene-solr with Apache License 2.0 | 4 votes |
public AssertingCodec() { super("Asserting", TestUtil.getDefaultCodec()); }
Example 3
Source File: CheapBastardCodec.java From lucene-solr with Apache License 2.0 | 4 votes |
public CheapBastardCodec() { super("CheapBastard", TestUtil.getDefaultCodec()); }
Example 4
Source File: CompressingCodec.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Creates a compressing codec with a given segment suffix */ public CompressingCodec(String name, String segmentSuffix, CompressionMode compressionMode, int chunkSize, int maxDocsPerChunk, int blockShift) { super(name, TestUtil.getDefaultCodec()); this.storedFieldsFormat = new CompressingStoredFieldsFormat(name, segmentSuffix, compressionMode, chunkSize, maxDocsPerChunk, blockShift); this.termVectorsFormat = new CompressingTermVectorsFormat(name, segmentSuffix, compressionMode, chunkSize, blockShift); }
Example 5
Source File: TestLucene60FieldInfoFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected Codec getCodec() { return TestUtil.getDefaultCodec(); }
Example 6
Source File: TestLucene50StoredFieldsFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected Codec getCodec() { return TestUtil.getDefaultCodec(); }
Example 7
Source File: TestLucene50LiveDocsFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected Codec getCodec() { return TestUtil.getDefaultCodec(); }
Example 8
Source File: TestLucene50TermVectorsFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected Codec getCodec() { return TestUtil.getDefaultCodec(); }
Example 9
Source File: TestLucene86SegmentInfoFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected Codec getCodec() { return TestUtil.getDefaultCodec(); }
Example 10
Source File: TestIndexWriterThreadsToSegments.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testDocsStuckInRAMForever() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setRAMBufferSizeMB(.2); Codec codec = TestUtil.getDefaultCodec(); iwc.setCodec(codec); iwc.setMergePolicy(NoMergePolicy.INSTANCE); final IndexWriter w = new IndexWriter(dir, iwc); final CountDownLatch startingGun = new CountDownLatch(1); Thread[] threads = new Thread[2]; for(int i=0;i<threads.length;i++) { final int threadID = i; threads[i] = new Thread() { @Override public void run() { try { startingGun.await(); for(int j=0;j<1000;j++) { Document doc = new Document(); doc.add(newStringField("field", "threadID" + threadID, Field.Store.NO)); w.addDocument(doc); } } catch (Exception e) { throw new RuntimeException(e); } } }; threads[i].start(); } startingGun.countDown(); for(Thread t : threads) { t.join(); } Set<String> segSeen = new HashSet<>(); int thread0Count = 0; int thread1Count = 0; // At this point the writer should have 2 thread states w/ docs; now we index with only 1 thread until we see all 1000 thread0 & thread1 // docs flushed. If the writer incorrectly holds onto previously indexed docs forever then this will run forever: long counter = 0; long checkAt = 100; while (thread0Count < 1000 || thread1Count < 1000) { Document doc = new Document(); doc.add(newStringField("field", "threadIDmain", Field.Store.NO)); w.addDocument(doc); if (counter++ == checkAt) { for(String fileName : dir.listAll()) { if (fileName.endsWith(".si")) { String segName = IndexFileNames.parseSegmentName(fileName); if (segSeen.contains(segName) == false) { segSeen.add(segName); byte id[] = readSegmentInfoID(dir, fileName); SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT); si.setCodec(codec); SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, -1, -1, -1, StringHelper.randomId()); SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, IOContext.DEFAULT); try { thread0Count += sr.docFreq(new Term("field", "threadID0")); thread1Count += sr.docFreq(new Term("field", "threadID1")); } finally { sr.close(); } } } } checkAt = (long) (checkAt * 1.25); counter = 0; } } w.close(); dir.close(); }
Example 11
Source File: TestAddIndexes.java From lucene-solr with Apache License 2.0 | 4 votes |
public UnRegisteredCodec() { super("NotRegistered", TestUtil.getDefaultCodec()); }
Example 12
Source File: TestIndexSorting.java From lucene-solr with Apache License 2.0 | 4 votes |
AssertingNeedsIndexSortCodec() { super(TestUtil.getDefaultCodec().getName(), TestUtil.getDefaultCodec()); }