Java Code Examples for org.apache.lucene.document.Field#setLongValue()
The following examples show how to use
org.apache.lucene.document.Field#setLongValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestLegacyField.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testLegacyLongField() throws Exception { Field fields[] = new Field[] { new LegacyLongField("foo", 5L, Field.Store.NO), new LegacyLongField("foo", 5L, Field.Store.YES) }; for (Field field : fields) { trySetByteValue(field); trySetBytesValue(field); trySetBytesRefValue(field); trySetDoubleValue(field); trySetIntValue(field); trySetFloatValue(field); field.setLongValue(6); // ok trySetReaderValue(field); trySetShortValue(field); trySetStringValue(field); trySetTokenStreamValue(field); assertEquals(6L, field.numericValue().longValue()); } }
Example 2
Source File: TestLucene80DocValuesFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Nightly public void testNumericFieldJumpTables() throws Exception { // IndexedDISI block skipping only activated if target >= current+2, so we need at least 5 blocks to // trigger consecutive block skips final int maxDoc = atLeast(5*65536); Directory dir = newDirectory(); IndexWriter iw = createFastIndexWriter(dir, maxDoc); Field idField = newStringField("id", "", Field.Store.NO); Field storedField = newStringField("stored", "", Field.Store.YES); Field dvField = new NumericDocValuesField("dv", 0); for (int i = 0 ; i < maxDoc ; i++) { Document doc = new Document(); idField.setStringValue(Integer.toBinaryString(i)); doc.add(idField); if (random().nextInt(100) > 10) { // Skip 10% to make DENSE blocks int value = random().nextInt(100000); storedField.setStringValue(Integer.toString(value)); doc.add(storedField); dvField.setLongValue(value); doc.add(dvField); } iw.addDocument(doc); } iw.flush(); iw.forceMerge(1, true); // Single segment to force large enough structures iw.commit(); iw.close(); assertDVIterate(dir); assertDVAdvance(dir, rarely() ? 1 : 7); // 1 is heavy (~20 s), so we do it rarely. 7 is a lot faster (8 s) dir.close(); }
Example 3
Source File: TestLucene80DocValuesFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
private void doTestSparseNumericBlocksOfVariousBitsPerValue(double density) throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); conf.setMaxBufferedDocs(atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE)); conf.setRAMBufferSizeMB(-1); conf.setMergePolicy(newLogMergePolicy(random().nextBoolean())); IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); Field storedField = newStringField("stored", "", Field.Store.YES); Field dvField = new NumericDocValuesField("dv", 0); doc.add(storedField); doc.add(dvField); final int numDocs = atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE*3); final LongSupplier longs = blocksOfVariousBPV(); for (int i = 0; i < numDocs; i++) { if (random().nextDouble() > density) { writer.addDocument(new Document()); continue; } long value = longs.getAsLong(); storedField.setStringValue(Long.toString(value)); dvField.setLongValue(value); writer.addDocument(doc); } writer.forceMerge(1); writer.close(); // compare assertDVIterate(dir); assertDVAdvance(dir, 1); // Tests all jump-lengths from 1 to maxDoc (quite slow ~= 1 minute for 200K docs) dir.close(); }
Example 4
Source File: TestMultiDocValues.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testNumerics() throws Exception { Directory dir = newDirectory(); Document doc = new Document(); Field field = new NumericDocValuesField("numbers", 0); doc.add(field); IndexWriterConfig iwc = newIndexWriterConfig(random(), null); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50); for (int i = 0; i < numDocs; i++) { field.setLongValue(random().nextLong()); iw.addDocument(doc); if (random().nextInt(17) == 0) { iw.commit(); } } DirectoryReader ir = iw.getReader(); iw.forceMerge(1); DirectoryReader ir2 = iw.getReader(); LeafReader merged = getOnlyLeafReader(ir2); iw.close(); NumericDocValues multi = MultiDocValues.getNumericValues(ir, "numbers"); NumericDocValues single = merged.getNumericDocValues("numbers"); for (int i = 0; i < numDocs; i++) { assertEquals(i, multi.nextDoc()); assertEquals(i, single.nextDoc()); assertEquals(single.longValue(), multi.longValue()); } testRandomAdvance(merged.getNumericDocValues("numbers"), MultiDocValues.getNumericValues(ir, "numbers")); testRandomAdvanceExact(merged.getNumericDocValues("numbers"), MultiDocValues.getNumericValues(ir, "numbers"), merged.maxDoc()); ir.close(); ir2.close(); dir.close(); }
Example 5
Source File: BaseDocValuesFormatTestCase.java From lucene-solr with Apache License 2.0 | 4 votes |
/** Tests dv against stored fields with threads (binary/numeric/sorted, no missing) */ public void testThreads() throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); Field storedBinField = new StoredField("storedBin", new byte[0]); Field dvBinField = new BinaryDocValuesField("dvBin", new BytesRef()); Field dvSortedField = new SortedDocValuesField("dvSorted", new BytesRef()); Field storedNumericField = new StoredField("storedNum", ""); Field dvNumericField = new NumericDocValuesField("dvNum", 0); doc.add(idField); doc.add(storedBinField); doc.add(dvBinField); doc.add(dvSortedField); doc.add(storedNumericField); doc.add(dvNumericField); // index some docs int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); int length = TestUtil.nextInt(random(), 0, 8); byte buffer[] = new byte[length]; random().nextBytes(buffer); storedBinField.setBytesValue(buffer); dvBinField.setBytesValue(buffer); dvSortedField.setBytesValue(buffer); long numericValue = random().nextLong(); storedNumericField.setStringValue(Long.toString(numericValue)); dvNumericField.setLongValue(numericValue); writer.addDocument(doc); if (random().nextInt(31) == 0) { writer.commit(); } } // delete some docs int numDeletions = random().nextInt(numDocs/10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } writer.close(); // compare final DirectoryReader ir = DirectoryReader.open(dir); int numThreads = TestUtil.nextInt(random(), 2, 7); Thread threads[] = new Thread[numThreads]; final CountDownLatch startingGun = new CountDownLatch(1); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { try { startingGun.await(); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); BinaryDocValues binaries = r.getBinaryDocValues("dvBin"); SortedDocValues sorted = r.getSortedDocValues("dvSorted"); NumericDocValues numerics = r.getNumericDocValues("dvNum"); for (int j = 0; j < r.maxDoc(); j++) { BytesRef binaryValue = r.document(j).getBinaryValue("storedBin"); assertEquals(j, binaries.nextDoc()); BytesRef scratch = binaries.binaryValue(); assertEquals(binaryValue, scratch); assertEquals(j, sorted.nextDoc()); scratch = sorted.binaryValue(); assertEquals(binaryValue, scratch); String expected = r.document(j).get("storedNum"); assertEquals(j, numerics.nextDoc()); assertEquals(Long.parseLong(expected), numerics.longValue()); } } TestUtil.checkReader(ir); } catch (Exception e) { throw new RuntimeException(e); } } }; threads[i].start(); } startingGun.countDown(); for (Thread t : threads) { t.join(); } ir.close(); dir.close(); }
Example 6
Source File: BaseTestRangeFilter.java From lucene-solr with Apache License 2.0 | 4 votes |
private static IndexReader build(Random random, TestIndex index) throws IOException { /* build an index */ Document doc = new Document(); Field idField = newStringField(random, "id", "", Field.Store.YES); Field idDVField = new SortedDocValuesField("id", new BytesRef()); Field intIdField = new IntPoint("id_int", 0); Field intDVField = new NumericDocValuesField("id_int", 0); Field floatIdField = new FloatPoint("id_float", 0); Field floatDVField = new NumericDocValuesField("id_float", 0); Field longIdField = new LongPoint("id_long", 0); Field longDVField = new NumericDocValuesField("id_long", 0); Field doubleIdField = new DoublePoint("id_double", 0); Field doubleDVField = new NumericDocValuesField("id_double", 0); Field randField = newStringField(random, "rand", "", Field.Store.YES); Field randDVField = new SortedDocValuesField("rand", new BytesRef()); Field bodyField = newStringField(random, "body", "", Field.Store.NO); Field bodyDVField = new SortedDocValuesField("body", new BytesRef()); doc.add(idField); doc.add(idDVField); doc.add(intIdField); doc.add(intDVField); doc.add(floatIdField); doc.add(floatDVField); doc.add(longIdField); doc.add(longDVField); doc.add(doubleIdField); doc.add(doubleDVField); doc.add(randField); doc.add(randDVField); doc.add(bodyField); doc.add(bodyDVField); RandomIndexWriter writer = new RandomIndexWriter(random, index.index, newIndexWriterConfig(random, new MockAnalyzer(random)) .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy())); TestUtil.reduceOpenFiles(writer.w); while(true) { int minCount = 0; int maxCount = 0; for (int d = minId; d <= maxId; d++) { idField.setStringValue(pad(d)); idDVField.setBytesValue(new BytesRef(pad(d))); intIdField.setIntValue(d); intDVField.setLongValue(d); floatIdField.setFloatValue(d); floatDVField.setLongValue(Float.floatToRawIntBits(d)); longIdField.setLongValue(d); longDVField.setLongValue(d); doubleIdField.setDoubleValue(d); doubleDVField.setLongValue(Double.doubleToRawLongBits(d)); int r = index.allowNegativeRandomInts ? random.nextInt() : random .nextInt(Integer.MAX_VALUE); if (index.maxR < r) { index.maxR = r; maxCount = 1; } else if (index.maxR == r) { maxCount++; } if (r < index.minR) { index.minR = r; minCount = 1; } else if (r == index.minR) { minCount++; } randField.setStringValue(pad(r)); randDVField.setBytesValue(new BytesRef(pad(r))); bodyField.setStringValue("body"); bodyDVField.setBytesValue(new BytesRef("body")); writer.addDocument(doc); } if (minCount == 1 && maxCount == 1) { // our subclasses rely on only 1 doc having the min or // max, so, we loop until we satisfy that. it should be // exceedingly rare (Yonik calculates 1 in ~429,000) // times) that this loop requires more than one try: IndexReader ir = writer.getReader(); writer.close(); return ir; } // try again writer.deleteAll(); } }