org.apache.lucene.index.SegmentReadState Java Examples
The following examples show how to use
org.apache.lucene.index.SegmentReadState.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RAMOnlyPostingsFormat.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState readState) throws IOException { // Load our ID: final String idFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, ID_EXTENSION); IndexInput in = readState.directory.openInput(idFileName, readState.context); boolean success = false; final int id; try { CodecUtil.checkHeader(in, RAM_ONLY_NAME, VERSION_START, VERSION_LATEST); id = in.readVInt(); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(in); } else { IOUtils.close(in); } } synchronized(state) { return state.get(id); } }
Example #2
Source File: DirectPostingsFormat.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { FieldsProducer postings = PostingsFormat.forName("Lucene84").fieldsProducer(state); if (state.context.context != IOContext.Context.MERGE) { FieldsProducer loadedPostings; try { postings.checkIntegrity(); loadedPostings = new DirectFields(state, postings, minSkipCount, lowFreqCutoff); } finally { postings.close(); } return loadedPostings; } else { // Don't load postings for merge: return postings; } }
Example #3
Source File: PointsFormat.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public PointsReader fieldsReader(SegmentReadState state) { return new PointsReader() { @Override public void close() { } @Override public long ramBytesUsed() { return 0L; } @Override public void checkIntegrity() { } @Override public PointValues getValues(String field) { throw new IllegalArgumentException("field=\"" + field + "\" was not indexed with points"); } }; }
Example #4
Source File: BlockTreeOrdsPostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new Lucene84PostingsReader(state); boolean success = false; try { FieldsProducer ret = new OrdsBlockTreeTermsReader(postingsReader, state); success = true; return ret; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
Example #5
Source File: IDVersionPostingsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void init(IndexInput termsIn, SegmentReadState state) throws IOException { // Make sure we are talking to the matching postings writer CodecUtil.checkIndexHeader(termsIn, IDVersionPostingsWriter.TERMS_CODEC, IDVersionPostingsWriter.VERSION_START, IDVersionPostingsWriter.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); }
Example #6
Source File: IDVersionPostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new IDVersionPostingsReader(); boolean success = false; try { FieldsProducer ret = new VersionBlockTreeTermsReader(postingsReader, state); success = true; return ret; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
Example #7
Source File: CompletionFieldsProducer.java From lucene-solr with Apache License 2.0 | 5 votes |
CompletionFieldsProducer(String codecName, SegmentReadState state, FSTLoadMode fstLoadMode) throws IOException { String indexFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, INDEX_EXTENSION); delegateFieldsProducer = null; boolean success = false; try (ChecksumIndexInput index = state.directory.openChecksumInput(indexFile, state.context)) { // open up dict file containing all fsts String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION); dictIn = state.directory.openInput(dictFile, state.context); CodecUtil.checkIndexHeader(dictIn, codecName, COMPLETION_CODEC_VERSION, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); // just validate the footer for the dictIn CodecUtil.retrieveChecksum(dictIn); // open up index file (fieldNumber, offset) CodecUtil.checkIndexHeader(index, codecName, COMPLETION_CODEC_VERSION, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); // load delegate PF PostingsFormat delegatePostingsFormat = PostingsFormat.forName(index.readString()); delegateFieldsProducer = delegatePostingsFormat.fieldsProducer(state); // read suggest field numbers and their offsets in the terms file from index int numFields = index.readVInt(); readers = new HashMap<>(numFields); for (int i = 0; i < numFields; i++) { int fieldNumber = index.readVInt(); long offset = index.readVLong(); long minWeight = index.readVLong(); long maxWeight = index.readVLong(); byte type = index.readByte(); FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNumber); // we don't load the FST yet readers.put(fieldInfo.name, new CompletionsTermsReader(dictIn, offset, minWeight, maxWeight, type, fstLoadMode)); } CodecUtil.checkFooter(index); success = true; } finally { if (success == false) { IOUtils.closeWhileHandlingException(delegateFieldsProducer, dictIn); } } }
Example #8
Source File: STUniformSplitTermsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected void fillFieldMap(PostingsReaderBase postingsReader, SegmentReadState state, BlockDecoder blockDecoder, boolean dictionaryOnHeap, IndexInput dictionaryInput, IndexInput blockInput, Collection<FieldMetadata> fieldMetadataCollection, FieldInfos fieldInfos) throws IOException { if (!fieldMetadataCollection.isEmpty()) { FieldMetadata unionFieldMetadata = createUnionFieldMetadata(fieldMetadataCollection); // Share the same immutable dictionary between all fields. IndexDictionary.BrowserSupplier dictionaryBrowserSupplier = createDictionaryBrowserSupplier(state, dictionaryInput, unionFieldMetadata, blockDecoder, dictionaryOnHeap); for (FieldMetadata fieldMetadata : fieldMetadataCollection) { fieldToTermsMap.put(fieldMetadata.getFieldInfo().name, new STUniformSplitTerms(blockInput, fieldMetadata, unionFieldMetadata, postingsReader, blockDecoder, fieldInfos, dictionaryBrowserSupplier)); } } }
Example #9
Source File: STUniformSplitTermsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * @see UniformSplitTermsReader#UniformSplitTermsReader(PostingsReaderBase, SegmentReadState, BlockDecoder, boolean) */ protected STUniformSplitTermsReader(PostingsReaderBase postingsReader, SegmentReadState state, BlockDecoder blockDecoder, boolean dictionaryOnHeap, FieldMetadata.Serializer fieldMetadataReader, String codecName, int versionStart, int versionCurrent, String termsBlocksExtension, String dictionaryExtension) throws IOException { super(postingsReader, state, blockDecoder, dictionaryOnHeap, fieldMetadataReader, codecName, versionStart, versionCurrent, termsBlocksExtension, dictionaryExtension); }
Example #10
Source File: UniformSplitPostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new Lucene84PostingsReader(state); boolean success = false; try { FieldsProducer termsReader = createUniformSplitTermsReader(postingsReader, state, blockDecoder); success = true; return termsReader; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
Example #11
Source File: UniformSplitTermsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
protected void fillFieldMap(PostingsReaderBase postingsReader, SegmentReadState state, BlockDecoder blockDecoder, boolean dictionaryOnHeap, IndexInput dictionaryInput, IndexInput blockInput, Collection<FieldMetadata> fieldMetadataCollection, FieldInfos fieldInfos) throws IOException { for (FieldMetadata fieldMetadata : fieldMetadataCollection) { IndexDictionary.BrowserSupplier dictionaryBrowserSupplier = createDictionaryBrowserSupplier(state, dictionaryInput, fieldMetadata, blockDecoder, dictionaryOnHeap); fieldToTermsMap.put(fieldMetadata.getFieldInfo().name, new UniformSplitTerms(blockInput, fieldMetadata, postingsReader, blockDecoder, dictionaryBrowserSupplier)); } }
Example #12
Source File: AssertingDocValuesFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException { assert state.fieldInfos.hasDocValues(); DocValuesProducer producer = in.fieldsProducer(state); assert producer != null; return new AssertingDocValuesProducer(producer, state.segmentInfo.maxDoc(), false); }
Example #13
Source File: TestGeo3DPoint.java From lucene-solr with Apache License 2.0 | 5 votes |
private static Codec getCodec() { if (Codec.getDefault().getName().equals("Lucene84")) { int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048); double maxMBSortInHeap = 3.0 + (3*random().nextDouble()); if (VERBOSE) { System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap); } return new FilterCodec("Lucene84", Codec.getDefault()) { @Override public PointsFormat pointsFormat() { return new PointsFormat() { @Override public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap); } @Override public PointsReader fieldsReader(SegmentReadState readState) throws IOException { return new Lucene86PointsReader(readState); } }; } }; } else { return Codec.getDefault(); } }
Example #14
Source File: FSTTermsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
public FSTTermsReader(SegmentReadState state, PostingsReaderBase postingsReader) throws IOException { final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, FSTTermsWriter.TERMS_EXTENSION); this.postingsReader = postingsReader; final IndexInput in = state.directory.openInput(termsFileName, state.context); boolean success = false; try { CodecUtil.checkIndexHeader(in, FSTTermsWriter.TERMS_CODEC_NAME, FSTTermsWriter.TERMS_VERSION_START, FSTTermsWriter.TERMS_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); CodecUtil.checksumEntireFile(in); this.postingsReader.init(in, state); seekDir(in); final FieldInfos fieldInfos = state.fieldInfos; final int numFields = in.readVInt(); for (int i = 0; i < numFields; i++) { int fieldNumber = in.readVInt(); FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber); long numTerms = in.readVLong(); long sumTotalTermFreq = in.readVLong(); // if frequencies are omitted, sumTotalTermFreq=sumDocFreq and we only write one value long sumDocFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS ? sumTotalTermFreq : in.readVLong(); int docCount = in.readVInt(); TermsReader current = new TermsReader(fieldInfo, in, numTerms, sumTotalTermFreq, sumDocFreq, docCount); TermsReader previous = fields.put(fieldInfo.name, current); checkFieldSummary(state.segmentInfo, in, current, previous); } success = true; } finally { if (success) { IOUtils.close(in); } else { IOUtils.closeWhileHandlingException(in); } } }
Example #15
Source File: SimpleTextFieldsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
public SimpleTextFieldsReader(SegmentReadState state) throws IOException { this.maxDoc = state.segmentInfo.maxDoc(); fieldInfos = state.fieldInfos; in = state.directory.openInput(SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix), state.context); boolean success = false; try { fields = readFields(in.clone()); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(this); } } }
Example #16
Source File: FSTPostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new Lucene84PostingsReader(state); boolean success = false; try { FieldsProducer ret = new FSTTermsReader(state, postingsReader); success = true; return ret; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
Example #17
Source File: SimpleTextPointsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
public SimpleTextPointsReader(SegmentReadState readState) throws IOException { // Initialize readers now: // Read index: Map<String,Long> fieldToFileOffset = new HashMap<>(); String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION); try (ChecksumIndexInput in = readState.directory.openChecksumInput(indexFileName, IOContext.DEFAULT)) { readLine(in); int count = parseInt(FIELD_COUNT); for(int i=0;i<count;i++) { readLine(in); String fieldName = stripPrefix(FIELD_FP_NAME); readLine(in); long fp = parseLong(FIELD_FP); fieldToFileOffset.put(fieldName, fp); } SimpleTextUtil.checkFooter(in); } boolean success = false; String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_EXTENSION); dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT); try { for(Map.Entry<String,Long> ent : fieldToFileOffset.entrySet()) { readers.put(ent.getKey(), initReader(ent.getValue())); } success = true; } finally { if (success == false) { IOUtils.closeWhileHandlingException(this); } } this.readState = readState; }
Example #18
Source File: AssertingNormsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public NormsProducer normsProducer(SegmentReadState state) throws IOException { assert state.fieldInfos.hasNorms(); NormsProducer producer = in.normsProducer(state); assert producer != null; return new AssertingNormsProducer(producer, state.segmentInfo.maxDoc(), false); }
Example #19
Source File: Completion090PostingsFormat.java From Elasticsearch with Apache License 2.0 | 5 votes |
public CompletionFieldsProducer(SegmentReadState state) throws IOException { String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION); IndexInput input = state.directory.openInput(suggestFSTFile, state.context); version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT); FieldsProducer delegateProducer = null; boolean success = false; try { PostingsFormat delegatePostingsFormat = PostingsFormat.forName(input.readString()); String providerName = input.readString(); CompletionLookupProvider completionLookupProvider = providers.get(providerName); if (completionLookupProvider == null) { throw new IllegalStateException("no provider with name [" + providerName + "] registered"); } // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage? delegateProducer = delegatePostingsFormat.fieldsProducer(state); /* * If we are merging we don't load the FSTs at all such that we * don't consume so much memory during merge */ if (state.context.context != Context.MERGE) { // TODO: maybe we can do this in a fully lazy fashion based on some configuration // eventually we should have some kind of curciut breaker that prevents us from going OOM here // with some configuration this.lookupFactory = completionLookupProvider.load(input); } else { this.lookupFactory = null; } this.delegateProducer = delegateProducer; success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(delegateProducer, input); } else { IOUtils.close(input); } } }
Example #20
Source File: LuceneVarGapDocFreqInterval.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene84PostingsReader(state); TermsIndexReaderBase indexReader; boolean success = false; try { indexReader = new VariableGapTermsIndexReader(state); success = true; } finally { if (!success) { postings.close(); } } success = false; try { FieldsProducer ret = new BlockTermsReader(indexReader, postings, state); success = true; return ret; } finally { if (!success) { try { postings.close(); } finally { indexReader.close(); } } } }
Example #21
Source File: LuceneFixedGap.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene84PostingsReader(state); TermsIndexReaderBase indexReader; boolean success = false; try { indexReader = new FixedGapTermsIndexReader(state); success = true; } finally { if (!success) { postings.close(); } } success = false; try { FieldsProducer ret = new BlockTermsReader(indexReader, postings, state); success = true; return ret; } finally { if (!success) { try { postings.close(); } finally { indexReader.close(); } } } }
Example #22
Source File: LuceneVarGapFixedInterval.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postings = new Lucene84PostingsReader(state); TermsIndexReaderBase indexReader; boolean success = false; try { indexReader = new VariableGapTermsIndexReader(state); success = true; } finally { if (!success) { postings.close(); } } success = false; try { FieldsProducer ret = new BlockTermsReader(indexReader, postings, state); success = true; return ret; } finally { if (!success) { try { postings.close(); } finally { indexReader.close(); } } } }
Example #23
Source File: Lucene80NormsProducer.java From lucene-solr with Apache License 2.0 | 5 votes |
Lucene80NormsProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException { maxDoc = state.segmentInfo.maxDoc(); String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension); int version = -1; // read in the entries from the metadata file. try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) { Throwable priorE = null; try { version = CodecUtil.checkIndexHeader(in, metaCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); readFields(in, state.fieldInfos); } catch (Throwable exception) { priorE = exception; } finally { CodecUtil.checkFooter(in, priorE); } } String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension); data = state.directory.openInput(dataName, state.context); boolean success = false; try { final int version2 = CodecUtil.checkIndexHeader(data, dataCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); if (version != version2) { throw new CorruptIndexException("Format versions mismatch: meta=" + version + ",data=" + version2, data); } // NOTE: data file is too costly to verify checksum against all the bytes on open, // but for now we at least verify proper structure of the checksum footer: which looks // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption // such as file truncation. CodecUtil.retrieveChecksum(data); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(this.data); } } }
Example #24
Source File: PerFieldPostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
public FieldsReader(final SegmentReadState readState) throws IOException { // Read _X.per and init each format: boolean success = false; try { // Read field name -> format name for (FieldInfo fi : readState.fieldInfos) { if (fi.getIndexOptions() != IndexOptions.NONE) { final String fieldName = fi.name; final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY); if (formatName != null) { // null formatName means the field is in fieldInfos, but has no postings! final String suffix = fi.getAttribute(PER_FIELD_SUFFIX_KEY); if (suffix == null) { throw new IllegalStateException("missing attribute: " + PER_FIELD_SUFFIX_KEY + " for field: " + fieldName); } PostingsFormat format = PostingsFormat.forName(formatName); String segmentSuffix = getSuffix(formatName, suffix); if (!formats.containsKey(segmentSuffix)) { formats.put(segmentSuffix, format.fieldsProducer(new SegmentReadState(readState, segmentSuffix))); } fields.put(fieldName, formats.get(segmentSuffix)); } } } success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(formats.values()); } } this.segment = readState.segmentInfo.name; }
Example #25
Source File: PerFieldDocValuesFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
public FieldsReader(final SegmentReadState readState) throws IOException { // Init each unique format: boolean success = false; try { // Read field name -> format name for (FieldInfo fi : readState.fieldInfos) { if (fi.getDocValuesType() != DocValuesType.NONE) { final String fieldName = fi.name; final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY); if (formatName != null) { // null formatName means the field is in fieldInfos, but has no docvalues! final String suffix = fi.getAttribute(PER_FIELD_SUFFIX_KEY); if (suffix == null) { throw new IllegalStateException("missing attribute: " + PER_FIELD_SUFFIX_KEY + " for field: " + fieldName); } DocValuesFormat format = DocValuesFormat.forName(formatName); String segmentSuffix = getFullSegmentSuffix(readState.segmentSuffix, getSuffix(formatName, suffix)); if (!formats.containsKey(segmentSuffix)) { formats.put(segmentSuffix, format.fieldsProducer(new SegmentReadState(readState, segmentSuffix))); } fields.put(fieldName, formats.get(segmentSuffix)); } } } success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(formats.values()); } } }
Example #26
Source File: Lucene84PostingsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void init(IndexInput termsIn, SegmentReadState state) throws IOException { // Make sure we are talking to the matching postings writer CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); final int indexBlockSize = termsIn.readVInt(); if (indexBlockSize != BLOCK_SIZE) { throw new IllegalStateException("index-time BLOCK_SIZE (" + indexBlockSize + ") != read-time BLOCK_SIZE (" + BLOCK_SIZE + ")"); } }
Example #27
Source File: TestLucene86PointsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
public TestLucene86PointsFormat() { // standard issue Codec defaultCodec = new Lucene86Codec(); if (random().nextBoolean()) { // randomize parameters maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500); double maxMBSortInHeap = 3.0 + (3*random().nextDouble()); if (VERBOSE) { System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap); } // sneaky impersonation! codec = new FilterCodec(defaultCodec.getName(), defaultCodec) { @Override public PointsFormat pointsFormat() { return new PointsFormat() { @Override public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap); } @Override public PointsReader fieldsReader(SegmentReadState readState) throws IOException { return new Lucene86PointsReader(readState); } }; } }; } else { // standard issue codec = defaultCodec; maxPointsInLeafNode = BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE; } }
Example #28
Source File: TestPointQueries.java From lucene-solr with Apache License 2.0 | 5 votes |
private static Codec getCodec() { if (Codec.getDefault().getName().equals("Lucene84")) { int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048); double maxMBSortInHeap = 5.0 + (3*random().nextDouble()); if (VERBOSE) { System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap); } return new FilterCodec("Lucene84", Codec.getDefault()) { @Override public PointsFormat pointsFormat() { return new PointsFormat() { @Override public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException { return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap); } @Override public PointsReader fieldsReader(SegmentReadState readState) throws IOException { return new Lucene86PointsReader(readState); } }; } }; } else { return Codec.getDefault(); } }
Example #29
Source File: Lucene50PostingsFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { PostingsReaderBase postingsReader = new Lucene50PostingsReader(state); boolean success = false; try { FieldsProducer ret = new BlockTreeTermsReader(postingsReader, state); success = true; return ret; } finally { if (!success) { IOUtils.closeWhileHandlingException(postingsReader); } } }
Example #30
Source File: Lucene50PostingsReader.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void init(IndexInput termsIn, SegmentReadState state) throws IOException { // Make sure we are talking to the matching postings writer CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); final int indexBlockSize = termsIn.readVInt(); if (indexBlockSize != BLOCK_SIZE) { throw new IllegalStateException("index-time BLOCK_SIZE (" + indexBlockSize + ") != read-time BLOCK_SIZE (" + BLOCK_SIZE + ")"); } }