org.apache.lucene.util.Version Java Examples
The following examples show how to use
org.apache.lucene.util.Version.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SearchInputController.java From olat with Apache License 2.0 | 6 votes |
protected Set<String> getHighlightWords(final String searchString) { try { final Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT); final TokenStream stream = analyzer.tokenStream("content", new StringReader(searchString)); final TermAttribute termAtt = stream.addAttribute(TermAttribute.class); for (boolean next = stream.incrementToken(); next; next = stream.incrementToken()) { final String term = termAtt.term(); if (log.isDebugEnabled()) { log.debug(term); } } } catch (final IOException e) { log.error("", e); } return null; }
Example #2
Source File: BaseSegmentInfoFormatTestCase.java From lucene-solr with Apache License 2.0 | 6 votes |
/** Test versions */ public void testVersions() throws Exception { Codec codec = getCodec(); for (Version v : getVersions()) { for (Version minV : new Version[] { v, null}) { Directory dir = newDirectory(); byte id[] = StringHelper.randomId(); SegmentInfo info = new SegmentInfo(dir, v, minV, "_123", 1, false, codec, Collections.<String,String>emptyMap(), id, Collections.emptyMap(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentInfo info2 = codec.segmentInfoFormat().read(dir, "_123", id, IOContext.DEFAULT); assertEquals(info2.getVersion(), v); if (supportsMinVersion()) { assertEquals(info2.getMinVersion(), minV); } else { assertEquals(info2.getMinVersion(), null); } dir.close(); } } }
Example #3
Source File: TestSegmentTermDocs.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testTermDocs() throws IOException { //After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, Version.LATEST.major, newIOContext(random())); assertTrue(reader != null); TermsEnum terms = reader.terms(DocHelper.TEXT_FIELD_2_KEY).iterator(); terms.seekCeil(new BytesRef("field")); PostingsEnum termDocs = TestUtil.docs(random(), terms, null, PostingsEnum.FREQS); if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docId = termDocs.docID(); assertTrue(docId == 0); int freq = termDocs.freq(); assertTrue(freq == 3); } reader.close(); }
Example #4
Source File: SegmentInfo.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Construct a new complete SegmentInfo instance from input. * <p>Note: this is public only to allow access from * the codecs package.</p> */ public SegmentInfo(Directory dir, Version version, Version minVersion, String name, int maxDoc, boolean isCompoundFile, Codec codec, Map<String,String> diagnostics, byte[] id, Map<String,String> attributes, Sort indexSort) { assert !(dir instanceof TrackingDirectoryWrapper); this.dir = Objects.requireNonNull(dir); this.version = Objects.requireNonNull(version); this.minVersion = minVersion; this.name = Objects.requireNonNull(name); this.maxDoc = maxDoc; this.isCompoundFile = isCompoundFile; this.codec = codec; this.diagnostics = Map.copyOf(Objects.requireNonNull(diagnostics)); this.id = id; if (id.length != StringHelper.ID_LENGTH) { throw new IllegalArgumentException("invalid id: " + Arrays.toString(id)); } this.attributes = Map.copyOf(Objects.requireNonNull(attributes)); this.indexSort = indexSort; }
Example #5
Source File: AutoCompleter.java From webdsl with Apache License 2.0 | 6 votes |
/** * Use a different index as the auto completer index or re-open * the existing index if <code>autocompleteIndex</code> is the same value * as given in the constructor. * @param autocompleteIndexDir the autocomplete directory to use * @throws AlreadyClosedException if the Autocompleter is already closed * @throws IOException if autocompleter can not open the directory */ // TODO: we should make this final as it is called in the constructor public void setAutoCompleteIndex(Directory autocompleteIndexDir) throws IOException { // this could be the same directory as the current autocompleteIndex // modifications to the directory should be synchronized synchronized (modifyCurrentIndexLock) { ensureOpen(); if (!IndexReader.indexExists(autocompleteIndexDir)) { IndexWriter writer = new IndexWriter(autocompleteIndexDir, new IndexWriterConfig(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT))); writer.close(); } swapSearcher(autocompleteIndexDir); } }
Example #6
Source File: LuceneHelper.java From dexter with Apache License 2.0 | 6 votes |
/** * Opens or creates a lucene index in the given directory * * @param wikiIdtToLuceneIdSerialization * - the file containing the serialized mapping between wiki-id * and Lucene documents ids * * @param indexPath * - the path of the directory with the Lucene's index */ protected LuceneHelper(File wikiIdtToLuceneIdSerialization, File indexPath) { logger.info("opening lucene index in folder {}", indexPath); config = new IndexWriterConfig(Version.LUCENE_41, ANALYZER); this.wikiIdtToLuceneIdSerialization = wikiIdtToLuceneIdSerialization; BooleanQuery.setMaxClauseCount(1000); try { index = FSDirectory.open(indexPath); // writer.commit(); } catch (Exception e) { logger.error("opening the index: {}", e.toString()); System.exit(1); } summarizer = new ArticleSummarizer(); writer = getWriter(); collectionSize = writer.numDocs(); wikiIdToLuceneId = Collections.emptyMap(); }
Example #7
Source File: TestIndexWriterOnOldIndex.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testOpenModeAndCreatedVersion() throws IOException { assumeTrue("Reenable when 8.0 is released", false); InputStream resource = getClass().getResourceAsStream("index.single-empty-doc.8.0.0.zip"); assertNotNull(resource); Path path = createTempDir(); TestUtil.unzip(resource, path); Directory dir = newFSDirectory(path); for (OpenMode openMode : OpenMode.values()) { Directory tmpDir = newDirectory(dir); assertEquals(7 /** 7.0.0 */, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); IndexWriter w = new IndexWriter(tmpDir, newIndexWriterConfig().setOpenMode(openMode)); w.commit(); w.close(); switch (openMode) { case CREATE: assertEquals(Version.LATEST.major, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); break; default: assertEquals(7 /** 7.0.0 */, SegmentInfos.readLatestCommit(tmpDir).getIndexCreatedVersionMajor()); } tmpDir.close(); } dir.close(); }
Example #8
Source File: RecoveryFileChunkRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); String checksum = in.readOptionalString(); content = in.readBytesReference(); Version writtenBy = null; String versionString = in.readOptionalString(); writtenBy = Lucene.parseVersionLenient(versionString, null); metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); sourceThrottleTimeInNanos = in.readLong(); }
Example #9
Source File: AlfrescoSolrDataModel.java From SearchServices with GNU Lesser General Public License v3.0 | 6 votes |
public Solr4QueryParser getLuceneQueryParser(SearchParameters searchParameters, SolrQueryRequest req, FTSQueryParser.RerankPhase rerankPhase) { Analyzer analyzer = req.getSchema().getQueryAnalyzer(); Solr4QueryParser parser = new Solr4QueryParser(req, Version.LATEST, searchParameters.getDefaultFieldName(), analyzer, rerankPhase); parser.setNamespacePrefixResolver(namespaceDAO); parser.setDictionaryService(getDictionaryService(CMISStrictDictionaryService.DEFAULT)); parser.setTenantService(tenantService); parser.setSearchParameters(searchParameters); parser.setAllowLeadingWildcard(true); Properties props = new CoreDescriptorDecorator(req.getCore().getCoreDescriptor()).getProperties(); int topTermSpanRewriteLimit = Integer.parseInt(props.getProperty("alfresco.topTermSpanRewriteLimit", "1000")); parser.setTopTermSpanRewriteLimit(topTermSpanRewriteLimit); return parser; }
Example #10
Source File: FastHdfsKeyValueDirectoryTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testMultipleWritersOpenOnSameDirectory() throws IOException { IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer()); FastHdfsKeyValueDirectory directory = new FastHdfsKeyValueDirectory(false, _timer, _configuration, new Path(_path, "test_multiple")); IndexWriter writer1 = new IndexWriter(directory, config.clone()); addDoc(writer1, getDoc(1)); IndexWriter writer2 = new IndexWriter(directory, config.clone()); addDoc(writer2, getDoc(2)); writer1.close(); writer2.close(); DirectoryReader reader = DirectoryReader.open(directory); int maxDoc = reader.maxDoc(); assertEquals(1, maxDoc); Document document = reader.document(0); assertEquals("2", document.get("id")); reader.close(); }
Example #11
Source File: ContextAnalyzerIndex.java From modernmt with Apache License 2.0 | 6 votes |
public ContextAnalyzerIndex(Directory directory, Rescorer rescorer) throws IOException { this.indexDirectory = directory; this.analyzer = new CorpusAnalyzer(); this.rescorer = rescorer; // Index writer setup IndexWriterConfig indexConfig = new IndexWriterConfig(Version.LUCENE_4_10_4, this.analyzer); indexConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); indexConfig.setSimilarity(new DefaultSimilarity() { @Override public float lengthNorm(FieldInvertState state) { return 1.f; } }); this.indexWriter = new IndexWriter(this.indexDirectory, indexConfig); // Ensure index exists if (!DirectoryReader.indexExists(directory)) this.indexWriter.commit(); }
Example #12
Source File: Blur024CodecTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testDocValuesFormat() throws IOException { RAMDirectory directory = new RAMDirectory(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43)); conf.setCodec(new Blur024Codec()); IndexWriter writer = new IndexWriter(directory, conf); Document doc = new Document(); doc.add(new StringField("f", "v", Store.YES)); doc.add(new SortedDocValuesField("f", new BytesRef("v"))); writer.addDocument(doc); writer.close(); DirectoryReader reader = DirectoryReader.open(directory); AtomicReaderContext context = reader.leaves().get(0); AtomicReader atomicReader = context.reader(); SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f"); assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName())); reader.close(); }
Example #13
Source File: IndexWriterWorker.java From olat with Apache License 2.0 | 6 votes |
/** * @param id * Unique index ID. Is used to generate unique directory name. * @param tempIndexPath * Absolute directory-path where the temporary index can be generated. * @param fullIndexer * Reference to full-index */ public IndexWriterWorker(final int id, final File tempIndexDir, final OlatFullIndexer fullIndexer) { this.id = id; this.indexPartDir = new File(tempIndexDir, "part" + id); this.fullIndexer = fullIndexer; try { final Directory luceneIndexPartDir = FSDirectory.open(indexPartDir); indexWriter = new IndexWriter(luceneIndexPartDir, new StandardAnalyzer(Version.LUCENE_30), true, IndexWriter.MaxFieldLength.UNLIMITED); indexWriter.setMergeFactor(fullIndexer.getSearchModuleConfig().getIndexerWriterMergeFactor()); log.info("IndexWriter config MergeFactor=" + indexWriter.getMergeFactor()); indexWriter.setRAMBufferSizeMB(fullIndexer.getSearchModuleConfig().getIndexerWriterRamBuffer()); log.info("IndexWriter config RAMBufferSizeMB=" + indexWriter.getRAMBufferSizeMB()); indexWriter.setUseCompoundFile(false); } catch (final IOException e) { log.warn("Can not create IndexWriter"); } }
Example #14
Source File: Indexer.java From sql-layer with GNU Affero General Public License v3.0 | 5 votes |
public Indexer(FullTextIndexShared index, Analyzer analyzer) throws IOException { IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer); iwc.setMaxBufferedDeleteTerms(1); // The deletion needs to be reflected immediately (on disk) this.index = index; this.writer = new IndexWriter(index.open(), iwc); }
Example #15
Source File: IndexSchema.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Constructs a schema using the specified resource name and stream. * By default, this follows the normal config path directory searching rules. * @see SolrResourceLoader#openResource */ public IndexSchema(String name, InputSource is, Version luceneVersion, SolrResourceLoader resourceLoader, Properties substitutableProperties) { this(luceneVersion, resourceLoader, substitutableProperties); this.resourceName = Objects.requireNonNull(name); try { readSchema(is); loader.inform(loader); } catch (IOException e) { throw new RuntimeException(e); } }
Example #16
Source File: TestSegmentMerger.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void setUp() throws Exception { super.setUp(); mergedDir = newDirectory(); merge1Dir = newDirectory(); merge2Dir = newDirectory(); DocHelper.setupDoc(doc1); SegmentCommitInfo info1 = DocHelper.writeDoc(random(), merge1Dir, doc1); DocHelper.setupDoc(doc2); SegmentCommitInfo info2 = DocHelper.writeDoc(random(), merge2Dir, doc2); reader1 = new SegmentReader(info1, Version.LATEST.major, newIOContext(random())); reader2 = new SegmentReader(info2, Version.LATEST.major, newIOContext(random())); }
Example #17
Source File: CoreTestContext.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
/** * Index will contain 26 documents with the following column/values: alpha = * double-letter a-z (lowercase characters); num = 0-25 val = val (constant * across all docs) * * New columns may be added so don't rely on the column count in tests. * * @return */ public static IndexContext newSimpleAlpaNumContext() { CoreTestContext ctx = new CoreTestContext(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43)); try { IndexWriter writer = new IndexWriter(ctx.directory, conf); for (int i = 0; i < 26; i++) { String alpha = new Character((char) (97 + i)).toString(); Document doc = new Document(); doc.add(new Field("id", Integer.toString(i), TextField.TYPE_STORED)); doc.add(new Field("alpha", alpha + alpha, TextField.TYPE_STORED)); doc.add(new Field("num", Integer.toString(i), TextField.TYPE_STORED)); doc.add(new Field("val", "val", TextField.TYPE_STORED)); writer.addDocument(doc); writer.commit(); } writer.commit(); writer.close(); } catch (IOException e) { throw new RuntimeException("Unable to create test context.", e); } return ctx; }
Example #18
Source File: FieldTypePluginLoader.java From lucene-solr with Apache License 2.0 | 5 votes |
private Version parseConfiguredVersion(String configuredVersion, String pluginClassName) { Version version = (configuredVersion != null) ? SolrConfig.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion(); if (!version.onOrAfter(Version.LUCENE_8_0_0)) { log.warn("{} is using deprecated {}" + " emulation. You should at some point declare and reindex to at least 8.0, because " + "7.x emulation is deprecated and will be removed in 9.0" , pluginClassName , version); } return version; }
Example #19
Source File: TestTieredMergePolicy.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testForcedMergeWithPending() throws Exception { final TieredMergePolicy tmp = new TieredMergePolicy(); final double maxSegmentSize = 10.0D; tmp.setMaxMergedSegmentMB(maxSegmentSize); SegmentInfos infos = new SegmentInfos(Version.LATEST.major); for (int j = 0; j < 30; ++j) { infos.add(makeSegmentCommitInfo("_" + j, 1000, 0, 1.0D, IndexWriter.SOURCE_MERGE)); } final MockMergeContext mergeContext = new MockMergeContext(SegmentCommitInfo::getDelCount); mergeContext.setMergingSegments(Collections.singleton(infos.asList().get(0))); final int expectedCount = random().nextInt(10) + 3; final MergeSpecification specification = tmp.findForcedMerges(infos, expectedCount, segmentsToMerge(infos), mergeContext); // Since we have fewer than 30 (the max merge count) segments more than the final size this would have been the final merge // so we check that it was prevented. assertNull(specification); SegmentInfos manySegmentsInfos = new SegmentInfos(Version.LATEST.major); final int manySegmentsCount = atLeast(500); for (int j = 0; j < manySegmentsCount; ++j) { manySegmentsInfos.add(makeSegmentCommitInfo("_" + j, 1000, 0, 0.1D, IndexWriter.SOURCE_MERGE)); } // We set one merge to be ongoing. Since we have more than 30 (the max merge count) times the number of segments // of that we want to merge to this is not the final merge and hence the returned specification must not be null. mergeContext.setMergingSegments(Collections.singleton(manySegmentsInfos.asList().get(0))); final MergeSpecification specificationManySegments = tmp.findForcedMerges(manySegmentsInfos, expectedCount, segmentsToMerge(manySegmentsInfos), mergeContext); assertMaxSize(specificationManySegments, maxSegmentSize); for (OneMerge merge : specificationManySegments.merges) { assertEquals("No merges of less than the max merge count are permitted while another merge is in progress", merge.segments.size(), tmp.getMaxMergeAtOnceExplicit()); } final int resultingCountManySegments = manySegmentsInfos.size() + specificationManySegments.merges.size() - specificationManySegments.merges.stream().mapToInt(spec -> spec.segments.size()).sum(); assertTrue(resultingCountManySegments >= expectedCount); }
Example #20
Source File: AclDiscoverFieldTypeDefinitionTest.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void test(int expected, boolean rowQuery, Collection<String> discoverAuthorizations) throws IOException, ParseException { DirectoryReader reader = DirectoryReader.open(_dir); SuperParser parser = new SuperParser(Version.LUCENE_43, _fieldManager, rowQuery, null, ScoreType.SUPER, new Term( BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE)); Query query = parser.parse("fam.string:value"); Collection<String> readAuthorizations = null; Set<String> discoverableFields = new HashSet<String>(); discoverableFields.add("rowid"); discoverableFields.add("recordid"); discoverableFields.add("family"); IndexSearcher searcher = new SecureIndexSearcher(reader, getAccessControlFactory(), readAuthorizations, discoverAuthorizations, discoverableFields, null); TopDocs topDocs = searcher.search(query, 10); assertEquals(expected, topDocs.totalHits); for (int i = 0; i < expected; i++) { int doc = topDocs.scoreDocs[i].doc; Document document = searcher.doc(doc); List<IndexableField> fields = document.getFields(); for (IndexableField field : fields) { assertTrue(discoverableFields.contains(field.name())); } } reader.close(); }
Example #21
Source File: TestSegmentInfos.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testVersionsTwoSegments() throws IOException { BaseDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); byte id[] = StringHelper.randomId(); Codec codec = Codec.getDefault(); SegmentInfos sis = new SegmentInfos(Version.LATEST.major); SegmentInfo info = new SegmentInfo(dir, Version.LUCENE_9_0_0, Version.LUCENE_9_0_0, "_0", 1, false, Codec.getDefault(), Collections.<String,String>emptyMap(), id, Collections.<String,String>emptyMap(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); SegmentCommitInfo commitInfo = new SegmentCommitInfo(info, 0, 0, -1, -1, -1, StringHelper.randomId()); sis.add(commitInfo); info = new SegmentInfo(dir, Version.LUCENE_9_0_0, Version.LUCENE_9_0_0, "_1", 1, false, Codec.getDefault(), Collections.<String,String>emptyMap(), id, Collections.<String,String>emptyMap(), null); info.setFiles(Collections.<String>emptySet()); codec.segmentInfoFormat().write(dir, info, IOContext.DEFAULT); commitInfo = new SegmentCommitInfo(info, 0, 0,-1, -1, -1, StringHelper.randomId()); sis.add(commitInfo); sis.commit(dir); byte[] commitInfoId0 = sis.info(0).getId(); byte[] commitInfoId1 = sis.info(1).getId(); sis = SegmentInfos.readLatestCommit(dir); assertEquals(Version.LUCENE_9_0_0, sis.getMinSegmentLuceneVersion()); assertEquals(Version.LATEST, sis.getCommitLuceneVersion()); assertEquals(StringHelper.idToString(commitInfoId0), StringHelper.idToString(sis.info(0).getId())); assertEquals(StringHelper.idToString(commitInfoId1), StringHelper.idToString(sis.info(1).getId())); dir.close(); }
Example #22
Source File: SolrDispatchFilter.java From lucene-solr with Apache License 2.0 | 5 votes |
private String solrVersion() { String specVer = Version.LATEST.toString(); try { String implVer = SolrCore.class.getPackage().getImplementationVersion(); return (specVer.equals(implVer.split(" ")[0])) ? specVer : implVer; } catch (Exception e) { return specVer; } }
Example #23
Source File: Analysis.java From crate with Apache License 2.0 | 5 votes |
public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) { // check for explicit version on the specific analyzer component String sVersion = settings.get("version"); if (sVersion != null) { return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // check for explicit version on the index itself as default for all analysis components sVersion = indexSettings.get("index.analysis.version"); if (sVersion != null) { return Lucene.parseVersion(sVersion, Version.LATEST, logger); } // resolve the analysis version based on the version the index was created with return org.elasticsearch.Version.indexCreated(indexSettings).luceneVersion; }
Example #24
Source File: NewAnalyzerTask.java From lucene-solr with Apache License 2.0 | 5 votes |
public static final Analyzer createAnalyzer(String className) throws Exception{ final Class<? extends Analyzer> clazz = Class.forName(className).asSubclass(Analyzer.class); try { // first try to use a ctor with version parameter (needed for many new Analyzers that have no default one anymore Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class); return cnstr.newInstance(Version.LATEST); } catch (NoSuchMethodException nsme) { // otherwise use default ctor return clazz.getConstructor().newInstance(); } }
Example #25
Source File: Lucene.java From Elasticsearch with Apache License 2.0 | 5 votes |
@SuppressWarnings("deprecation") public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) { if (version == null) { return defaultVersion; } try { return Version.parse(version); } catch (ParseException e) { logger.warn("no version match {}, default to {}", e, version, defaultVersion); return defaultVersion; } }
Example #26
Source File: QuadPrefixTree.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected SpatialPrefixTree newSPT() { QuadPrefixTree tree = new QuadPrefixTree(ctx, maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); @SuppressWarnings("deprecation") Version LUCENE_8_3_0 = Version.LUCENE_8_3_0; tree.robust = getVersion().onOrAfter(LUCENE_8_3_0); return tree; }
Example #27
Source File: OLuceneSpatialIndexManager.java From orientdb-lucene with Apache License 2.0 | 5 votes |
@Override public IndexWriter openIndexWriter(Directory directory, ODocument metadata) throws IOException { Analyzer analyzer = getAnalyzer(metadata); Version version = getLuceneVersion(metadata); IndexWriterConfig iwc = new IndexWriterConfig(version, analyzer); iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); return new IndexWriter(directory, iwc); }
Example #28
Source File: PackedQuadPrefixTree.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected SpatialPrefixTree newSPT() { PackedQuadPrefixTree tree = new PackedQuadPrefixTree(ctx, maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); @SuppressWarnings("deprecation") Version lucene830 = Version.LUCENE_8_3_0; tree.robust = getVersion().onOrAfter(lucene830); return tree; }
Example #29
Source File: Store.java From crate with Apache License 2.0 | 5 votes |
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder, Logger logger, Version version, boolean readFileAsHash) throws IOException { final String checksum; final BytesRefBuilder fileHash = new BytesRefBuilder(); try (IndexInput in = directory.openInput(file, IOContext.READONCE)) { final long length; try { length = in.length(); if (length < CodecUtil.footerLength()) { // truncated files trigger IAE if we seek negative... these files are really corrupted though throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in); } if (readFileAsHash) { final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for... hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length); checksum = digestToString(verifyingIndexInput.verify()); } else { checksum = digestToString(CodecUtil.retrieveChecksum(in)); } } catch (Exception ex) { logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); } }
Example #30
Source File: SchemaSimilarityFactory.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public Similarity getSimilarity() { if (null == core) { throw new IllegalStateException("SchemaSimilarityFactory can not be used until SolrCoreAware.inform has been called"); } if (null == similarity) { // Need to instantiate lazily, can't do this in inform(SolrCore) because of chicken/egg // circular initialization hell with core.getLatestSchema() to lookup defaultSimFromFieldType Similarity defaultSim = null; if (null == defaultSimFromFieldType) { // nothing configured, choose a sensible implicit default... defaultSim = coreVersion.onOrAfter(Version.LUCENE_8_0_0) ? new BM25Similarity() : new LegacyBM25Similarity(); } else { FieldType defSimFT = core.getLatestSchema().getFieldTypeByName(defaultSimFromFieldType); if (null == defSimFT) { throw new SolrException(ErrorCode.SERVER_ERROR, "SchemaSimilarityFactory configured with " + INIT_OPT + "='" + defaultSimFromFieldType + "' but that <fieldType> does not exist"); } defaultSim = defSimFT.getSimilarity(); if (null == defaultSim) { throw new SolrException(ErrorCode.SERVER_ERROR, "SchemaSimilarityFactory configured with " + INIT_OPT + "='" + defaultSimFromFieldType + "' but that <fieldType> does not define a <similarity>"); } } similarity = new SchemaSimilarity(defaultSim); } return similarity; }