org.elasticsearch.common.lucene.Lucene Java Examples
The following examples show how to use
org.elasticsearch.common.lucene.Lucene.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ParentQuery.java From Elasticsearch with Apache License 2.0 | 7 votes |
@Override public Scorer scorer(LeafReaderContext context) throws IOException { DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null); // we forcefully apply live docs here so that deleted children don't give matching parents childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs()); if (Lucene.isEmpty(childrenDocSet)) { return null; } final DocIdSetIterator childIterator = childrenDocSet.iterator(); if (childIterator == null) { return null; } SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType); if (bytesValues == null) { return null; } return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues); }
Example #2
Source File: test.java From vscode-extension with MIT License | 6 votes |
private void refreshLastCommittedSegmentInfos() { /* * we have to inc-ref the store here since if the engine is closed by a tragic event * we don't acquire the write lock and wait until we have exclusive access. This might also * dec the store reference which can essentially close the store and unless we can inc the reference * we can't use it. */ store.incRef(); try { // reread the last committed segment infos lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); } catch (Exception e) { if (isClosed.get() == false) { try { logger.warn("failed to read latest segment infos on flush", e); } catch (Exception inner) { e.addSuppressed(inner); } if (Lucene.isCorruptionException(e)) { throw new FlushFailedEngineException(shardId, e); } } } finally { store.decRef(); } }
Example #3
Source File: EngineTestCase.java From crate with Apache License 2.0 | 6 votes |
protected InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFactory, @Nullable BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier, @Nullable ToLongBiFunction<Engine, Engine.Operation> seqNoForOperation, EngineConfig config) throws IOException { final Store store = config.getStore(); final Directory directory = store.directory(); if (Lucene.indexExists(directory) == false) { store.createEmpty(); final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUuid); } InternalEngine internalEngine = createInternalEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config); internalEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); return internalEngine; }
Example #4
Source File: RestoreService.java From crate with Apache License 2.0 | 6 votes |
@Override public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) { if (failedShard.primary() && failedShard.initializing()) { RecoverySource recoverySource = failedShard.recoverySource(); if (recoverySource.getType() == RecoverySource.Type.SNAPSHOT) { Snapshot snapshot = ((SnapshotRecoverySource) recoverySource).snapshot(); // mark restore entry for this shard as failed when it's due to a file corruption. There is no need wait on retries // to restore this shard on another node if the snapshot files are corrupt. In case where a node just left or crashed, // however, we only want to acknowledge the restore operation once it has been successfully restored on another node. if (unassignedInfo.getFailure() != null && Lucene.isCorruptionException(unassignedInfo.getFailure().getCause())) { changes(snapshot).shards.put(failedShard.shardId(), new ShardRestoreStatus(failedShard.currentNodeId(), RestoreInProgress.State.FAILURE, unassignedInfo.getFailure().getCause().getMessage())); } } } }
Example #5
Source File: KeywordFieldMapper.java From crate with Apache License 2.0 | 6 votes |
@Override protected BytesRef indexedValueForSearch(Object value) { if (searchAnalyzer() == Lucene.KEYWORD_ANALYZER) { // keyword analyzer with the default attribute source which encodes terms using UTF8 // in that case we skip normalization, which may be slow if there many terms need to // parse (eg. large terms query) since Analyzer.normalize involves things like creating // attributes through reflection // This if statement will be used whenever a normalizer is NOT configured return super.indexedValueForSearch(value); } if (value == null) { return null; } if (value instanceof BytesRef) { value = ((BytesRef) value).utf8ToString(); } return searchAnalyzer().normalize(name(), value.toString()); }
Example #6
Source File: InternalEngine.java From crate with Apache License 2.0 | 6 votes |
private void refreshLastCommittedSegmentInfos() { /* * we have to inc-ref the store here since if the engine is closed by a tragic event * we don't acquire the write lock and wait until we have exclusive access. This might also * dec the store reference which can essentially close the store and unless we can inc the reference * we can't use it. */ store.incRef(); try { // reread the last committed segment infos lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); } catch (Exception e) { if (isClosed.get() == false) { try { logger.warn("failed to read latest segment infos on flush", e); } catch (Exception inner) { e.addSuppressed(inner); } if (Lucene.isCorruptionException(e)) { throw new FlushFailedEngineException(shardId, e); } } } finally { store.decRef(); } }
Example #7
Source File: Engine.java From crate with Apache License 2.0 | 6 votes |
protected final DocsStats docsStats(IndexReader indexReader) { long numDocs = 0; long numDeletedDocs = 0; long sizeInBytes = 0; // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause // the next scheduled refresh to go through and refresh the stats as well for (LeafReaderContext readerContext : indexReader.leaves()) { // we go on the segment level here to get accurate numbers final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader()); SegmentCommitInfo info = segmentReader.getSegmentInfo(); numDocs += readerContext.reader().numDocs(); numDeletedDocs += readerContext.reader().numDeletedDocs(); try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); }
Example #8
Source File: AnnotationIndicesAnalysis.java From elasticsearch-analysis-annotation with Apache License 2.0 | 6 votes |
@Inject public AnnotationIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) { super(settings); indicesAnalysisService.analyzerProviderFactories().put( "default", new PreBuiltAnalyzerProviderFactory("default", AnalyzerScope.INDICES, new AnnotationAnalyzer( Lucene.ANALYZER_VERSION))); indicesAnalysisService.tokenFilterFactories().put("annotation_filter", new PreBuiltTokenFilterFactoryFactory(new TokenFilterFactory() { @Override public String name() { return "annotation_filter"; } @Override public TokenStream create(TokenStream tokenStream) { return new InlineAnnotationFilter(tokenStream); } })); }
Example #9
Source File: ExplorerQueryTests.java From elasticsearch-learning-to-rank with Apache License 2.0 | 6 votes |
@Before public void setupIndex() throws Exception { dir = new ByteBuffersDirectory(); try(IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER))) { for (int i = 0; i < docs.length; i++) { Document doc = new Document(); doc.add(new Field("_id", Integer.toString(i + 1), StoredField.TYPE)); doc.add(newTextField("text", docs[i], Field.Store.YES)); indexWriter.addDocument(doc); } } reader = DirectoryReader.open(dir); searcher = new IndexSearcher(reader); }
Example #10
Source File: FieldMapper.java From Elasticsearch with Apache License 2.0 | 6 votes |
protected void setupFieldType(BuilderContext context) { fieldType.setNames(buildNames(context)); if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) { fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); } if (fieldDataSettings != null) { Settings settings = Settings.builder().put(fieldType.fieldDataType().getSettings()).put(fieldDataSettings).build(); fieldType.setFieldDataType(new FieldDataType(fieldType.fieldDataType().getType(), settings)); } boolean defaultDocValues = false; // pre 2.0 if (context.indexCreatedVersion().onOrAfter(Version.V_2_0_0_beta1)) { defaultDocValues = fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE; } // backcompat for "fielddata: format: docvalues" for now... boolean fieldDataDocValues = fieldType.fieldDataType() != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldType.fieldDataType().getFormat(context.indexSettings())); if (fieldDataDocValues && docValuesSet && fieldType.hasDocValues() == false) { // this forces the doc_values setting to be written, so fielddata does not mask the original setting defaultDocValues = true; } defaultFieldType.setHasDocValues(defaultDocValues); if (docValuesSet == false) { fieldType.setHasDocValues(defaultDocValues || fieldDataDocValues); } }
Example #11
Source File: ParentConstantScoreQuery.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public Scorer scorer(LeafReaderContext context) throws IOException { DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null); if (Lucene.isEmpty(childrenDocIdSet)) { return null; } SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType); if (globalValues != null) { // we forcefully apply live docs here so that deleted children don't give matching parents childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs()); DocIdSetIterator innerIterator = childrenDocIdSet.iterator(); if (innerIterator != null) { ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator( innerIterator, parentOrds, globalValues ); return ConstantScorer.create(childrenDocIdIterator, this, queryWeight); } } return null; }
Example #12
Source File: Engine.java From Elasticsearch with Apache License 2.0 | 6 votes |
/** * Read the last segments info from the commit pointed to by the searcher manager */ protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException { IndexSearcher searcher = sm.acquire(); try { IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit(); return Lucene.readSegmentInfos(latestCommit); } catch (IOException e) { // Fall back to reading from the store if reading from the commit fails try { return store. readLastCommittedSegmentsInfo(); } catch (IOException e2) { e2.addSuppressed(e); throw e2; } } finally { sm.release(searcher); } }
Example #13
Source File: Segment.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { name = in.readString(); generation = Long.parseLong(name.substring(1), Character.MAX_RADIX); committed = in.readBoolean(); search = in.readBoolean(); docCount = in.readInt(); delDocCount = in.readInt(); sizeInBytes = in.readLong(); version = Lucene.parseVersionLenient(in.readOptionalString(), null); compound = in.readOptionalBoolean(); mergeId = in.readOptionalString(); memoryInBytes = in.readLong(); if (in.readBoolean()) { // verbose mode ramTree = readRamTree(in); } }
Example #14
Source File: test.java From vscode-extension with MIT License | 6 votes |
private static LocalCheckpointTracker createLocalCheckpointTracker(EngineConfig engineConfig, SegmentInfos lastCommittedSegmentInfos, Logger logger, Supplier<Searcher> searcherSupplier, BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) { try { final SequenceNumbers.CommitInfo seqNoStats = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommittedSegmentInfos.userData.entrySet()); final long maxSeqNo = seqNoStats.maxSeqNo; final long localCheckpoint = seqNoStats.localCheckpoint; logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); final LocalCheckpointTracker tracker = localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); // Operations that are optimized using max_seq_no_of_updates optimization must not be processed twice; otherwise, they will // create duplicates in Lucene. To avoid this we check the LocalCheckpointTracker to see if an operation was already processed. // Thus, we need to restore the LocalCheckpointTracker bit by bit to ensure the consistency between LocalCheckpointTracker and // Lucene index. This is not the only solution since we can bootstrap max_seq_no_of_updates with max_seq_no of the commit to // disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker. if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Searcher searcher = searcherSupplier.get()) { Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, tracker::markSeqNoAsCompleted); } } return tracker; } catch (IOException ex) { throw new EngineCreationFailureException(engineConfig.getShardId(), "failed to create local checkpoint tracker", ex); } }
Example #15
Source File: RecoveryFileChunkRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); String name = in.readString(); position = in.readVLong(); long length = in.readVLong(); String checksum = in.readOptionalString(); content = in.readBytesReference(); Version writtenBy = null; String versionString = in.readOptionalString(); writtenBy = Lucene.parseVersionLenient(versionString, null); metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); totalTranslogOps = in.readVInt(); sourceThrottleTimeInNanos = in.readLong(); }
Example #16
Source File: QueryCollector.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void collect(int doc) throws IOException { final Query query = getQuery(doc); if (query == null) { // log??? return; } Query existsQuery = query; if (isNestedDoc) { existsQuery = new BooleanQuery.Builder() .add(existsQuery, Occur.MUST) .add(Queries.newNonNestedFilter(), Occur.FILTER) .build(); } // run the query try { if (Lucene.exists(searcher, existsQuery)) { topDocsLeafCollector.collect(doc); postMatch(doc); } } catch (IOException e) { logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); } }
Example #17
Source File: QueryCollector.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void collect(int doc) throws IOException { final Query query = getQuery(doc); if (query == null) { // log??? return; } Query existsQuery = query; if (isNestedDoc) { existsQuery = new BooleanQuery.Builder() .add(existsQuery, Occur.MUST) .add(Queries.newNonNestedFilter(), Occur.FILTER) .build(); } // run the query try { if (Lucene.exists(searcher, existsQuery)) { counter++; postMatch(doc); } } catch (IOException e) { logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); } }
Example #18
Source File: FiltersAggregator.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx)); } return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { boolean matched = false; for (int i = 0; i < bits.length; i++) { if (bits[i].get(doc)) { collectBucket(sub, doc, bucketOrd(bucket, i)); matched = true; } } if (showOtherBucket && !matched) { collectBucket(sub, doc, bucketOrd(bucket, bits.length)); } } }; }
Example #19
Source File: ShardFetchRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(id); out.writeVInt(size); for (int i = 0; i < size; i++) { out.writeVInt(docIds[i]); } if (lastEmittedDoc == null) { out.writeByte((byte) 0); } else if (lastEmittedDoc instanceof FieldDoc) { out.writeByte((byte) 1); Lucene.writeFieldDoc(out, (FieldDoc) lastEmittedDoc); } else { out.writeByte((byte) 2); Lucene.writeScoreDoc(out, lastEmittedDoc); } }
Example #20
Source File: ShardFetchRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); id = in.readLong(); size = in.readVInt(); docIds = new int[size]; for (int i = 0; i < size; i++) { docIds[i] = in.readVInt(); } byte flag = in.readByte(); if (flag == 1) { lastEmittedDoc = Lucene.readFieldDoc(in); } else if (flag == 2) { lastEmittedDoc = Lucene.readScoreDoc(in); } else if (flag != 0) { throw new IOException("Unknown flag: " + flag); } }
Example #21
Source File: Store.java From crate with Apache License 2.0 | 5 votes |
/** * Tries to open an index for the given location. This includes reading the * segment infos and possible corruption markers. If the index can not * be opened, an exception is thrown */ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException { try (ShardLock lock = shardLocker.lock(shardId, "open index", TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); SegmentInfos segInfo = Lucene.readSegmentInfos(dir); logger.trace("{} loaded segment info [{}]", shardId, segInfo); } }
Example #22
Source File: InnerHitsContext.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException { Query rawParentFilter; if (parentObjectMapper == null) { rawParentFilter = Queries.newNonNestedFilter(); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query childFilter = childObjectMapper.nestedTypeFilter(); Query q = Queries.filtered(query.query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext)); if (size() == 0) { return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; if (sort() != null) { try { topDocsCollector = TopFieldCollector.create(sort(), topN, true, trackScores(), trackScores()); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } } else { topDocsCollector = TopScoreDocCollector.create(topN); } try { context.searcher().search(q, topDocsCollector); } finally { clearReleasables(Lifetime.COLLECTION); } return topDocsCollector.topDocs(from(), size()); } }
Example #23
Source File: test.java From vscode-extension with MIT License | 5 votes |
private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; try { verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); } catch (Exception ignore) { } iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); iwc.setMergeScheduler(mergeScheduler); // Give us the opportunity to upgrade old segments while performing // background merges MergePolicy mergePolicy = config().getMergePolicy(); // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes. iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); if (softDeleteEnabled) { mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery, new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); } iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy)); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh if (config().getIndexSort() != null) { iwc.setIndexSort(config().getIndexSort()); } return iwc; }
Example #24
Source File: QueryCollector.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void collect(int doc) throws IOException { final Query query = getQuery(doc); if (query == null) { // log??? return; } Query existsQuery = query; if (isNestedDoc) { existsQuery = new BooleanQuery.Builder() .add(existsQuery, Occur.MUST) .add(Queries.newNonNestedFilter(), Occur.FILTER) .build(); } // run the query try { if (context.highlight() != null) { context.parsedQuery(new ParsedQuery(query)); context.hitContext().cache().clear(); } if (Lucene.exists(searcher, existsQuery)) { if (!limit || counter < size) { matches.add(BytesRef.deepCopyOf(current)); scores.add(scorer.score()); if (context.highlight() != null) { highlightPhase.hitExecute(context, context.hitContext()); hls.add(context.hitContext().hit().getHighlightFields()); } } counter++; postMatch(doc); } } catch (IOException e) { logger.warn("[" + current.utf8ToString() + "] failed to execute query", e); } }
Example #25
Source File: BlobStoreRepository.java From crate with Apache License 2.0 | 5 votes |
private static void failStoreIfCorrupted(Store store, Exception e) { if (Lucene.isCorruptionException(e)) { try { store.markStoreCorrupted((IOException) e); } catch (IOException inner) { inner.addSuppressed(e); LOGGER.warn("store cannot be marked as corrupted", inner); } } }
Example #26
Source File: LuceneChangesSnapshot.java From crate with Apache License 2.0 | 5 votes |
/** * Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range. * * @param engineSearcher the internal engine searcher which will be taken over if the snapshot is opened successfully * @param mapperService the mapper service which will be mainly used to resolve the document's type and uid * @param searchBatchSize the number of documents should be returned by each search * @param fromSeqNo the min requesting seq# - inclusive * @param toSeqNo the maximum requesting seq# - inclusive * @param requiredFullRange if true, the snapshot will strictly check for the existence of operations between fromSeqNo and toSeqNo */ LuceneChangesSnapshot(Engine.Searcher engineSearcher, MapperService mapperService, int searchBatchSize, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); } if (searchBatchSize <= 0) { throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]"); } final AtomicBoolean closed = new AtomicBoolean(); this.onClose = () -> { if (closed.compareAndSet(false, true)) { IOUtils.close(engineSearcher); } }; this.mapperService = mapperService; final long requestingSize = (toSeqNo - fromSeqNo) == Long.MAX_VALUE ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L); this.searchBatchSize = requestingSize < searchBatchSize ? Math.toIntExact(requestingSize) : searchBatchSize; this.fromSeqNo = fromSeqNo; this.toSeqNo = toSeqNo; this.lastSeenSeqNo = fromSeqNo - 1; this.requiredFullRange = requiredFullRange; this.indexSearcher = new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader())); this.indexSearcher.setQueryCache(null); this.parallelArray = new ParallelArray(this.searchBatchSize); final TopDocs topDocs = searchOperations(null); this.totalHits = Math.toIntExact(topDocs.totalHits.value); this.scoreDocs = topDocs.scoreDocs; fillParallelArray(scoreDocs, parallelArray); }
Example #27
Source File: LuceneChangesSnapshot.java From crate with Apache License 2.0 | 5 votes |
private boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException { final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); if (ndv == null || ndv.advanceExact(segmentDocId) == false) { throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found"); } return ndv.longValue() == 1; }
Example #28
Source File: SearchService.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void processFailure(SearchContext context, Throwable t) { freeContext(context.id()); try { if (Lucene.isCorruptionException(t)) { context.indexShard().failShard("search execution corruption failure", t); } } catch (Throwable e) { logger.warn("failed to process shard failure to (potentially) send back shard failure on corruption", e); } }
Example #29
Source File: Engine.java From Elasticsearch with Apache License 2.0 | 5 votes |
/** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Throwable t) { if (Lucene.isCorruptionException(t)) { failEngine("corrupt file (source: [" + source + "])", t); return true; } else if (ExceptionsHelper.isOOM(t)) { failEngine("out of memory (source: [" + source + "])", t); return true; } else if (t instanceof RecoveryFromDistributedLogFailedException) { failEngine("recovery from distributed log service failed", t); return true; } return false; }
Example #30
Source File: Engine.java From crate with Apache License 2.0 | 5 votes |
/** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Exception e) { if (Lucene.isCorruptionException(e)) { failEngine("corrupt file (source: [" + source + "])", e); return true; } return false; }