org.elasticsearch.index.engine.Engine Java Examples
The following examples show how to use
org.elasticsearch.index.engine.Engine.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TransportFieldStatsTransportAction.java From Elasticsearch with Apache License 2.0 | 7 votes |
@Override protected FieldStatsShardResponse shardOperation(FieldStatsShardRequest request) { ShardId shardId = request.shardId(); Map<String, FieldStats> fieldStats = new HashMap<>(); IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); MapperService mapperService = indexServices.mapperService(); IndexShard shard = indexServices.shardSafe(shardId.id()); try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { for (String field : request.getFields()) { MappedFieldType fieldType = mapperService.fullName(field); if (fieldType != null) { IndexReader reader = searcher.reader(); Terms terms = MultiFields.getTerms(reader, field); if (terms != null) { fieldStats.put(field, fieldType.stats(terms, reader.maxDoc())); } } else { throw new IllegalArgumentException("field [" + field + "] doesn't exist"); } } } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } return new FieldStatsShardResponse(shardId, fieldStats); }
Example #2
Source File: TransportIndexAction.java From Elasticsearch with Apache License 2.0 | 6 votes |
/** * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ public static Engine.IndexingOperation executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) { final ShardId shardId = indexShard.shardId(); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { operation = indexShard.prepareIndexOnReplica(sourceToParse, request.version(), request.versionType(), request.canHaveDuplicates()); } else { assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); operation = indexShard.prepareCreateOnReplica(sourceToParse, request.version(), request.versionType(), request.canHaveDuplicates(), request.autoGeneratedId()); } Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } operation.execute(indexShard); return operation; }
Example #3
Source File: IndexShard.java From Elasticsearch with Apache License 2.0 | 6 votes |
private void reindex(QueryFetchSearchResult hits, String index, String type) { logger.debug("Reindex: [index:{}, type:{}]", index, type); if (state == IndexShardState.STARTED) { for (InternalSearchHit hit : hits.fetchResult().hits().internalHits()) { // no difference between PRIMARY and REPLICA SourceToParse source = SourceToParse.source(SourceToParse.Origin.REPLICA, hit.sourceRef()) .index(index).type(type).id(hit.id()); if (hit.field(ParentFieldMapper.NAME).getValue() != null) { source.parent((String) hit.field(ParentFieldMapper.NAME).getValue()); } if (hit.field(TimestampFieldMapper.NAME).getValue() != null) { source.timestamp((long) hit.field(TimestampFieldMapper.NAME).getValue()); } long version = 0; if (hit.field(VersionFieldMapper.NAME).getValue() != null) { version = (long) hit.field(VersionFieldMapper.NAME).getValue(); } Engine.Index indexOp = prepareIndex(docMapper(source.type()), source, version, VersionType.EXTERNAL_GTE, Engine.Operation.Origin.RECOVERY, state != IndexShardState.STARTED); indexOp.setReindex(true); index(indexOp); } } }
Example #4
Source File: IndexShard.java From crate with Apache License 2.0 | 6 votes |
public static Engine.Index prepareIndex(DocumentMapper docMapper, SourceToParse source, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry, long ifSeqNo, long ifPrimaryTerm) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.parse(source); Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id())); return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry, ifSeqNo, ifPrimaryTerm); }
Example #5
Source File: IndexShard.java From crate with Apache License 2.0 | 6 votes |
/** * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard, * without having to worry about the current state of the engine and concurrent flushes. * * @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory * @throws org.apache.lucene.index.CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum * mismatch or an unexpected exception when opening the index reading the * segments file. * @throws org.apache.lucene.index.IndexFormatTooOldException if the lucene index is too old to be opened. * @throws org.apache.lucene.index.IndexFormatTooNewException if the lucene index is too new to be opened. * @throws java.io.FileNotFoundException if one or more files referenced by a commit are not present. * @throws java.nio.file.NoSuchFileException if one or more files referenced by a commit are not present. */ public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { Engine.IndexCommitRef indexCommit = null; store.incRef(); try { Engine engine; synchronized (mutex) { // if the engine is not running, we can access the store directly, but we need to make sure no one starts // the engine on us. If the engine is running, we can get a snapshot via the deletion policy which is initialized. // That can be done out of mutex, since the engine can be closed half way. engine = getEngineOrNull(); if (engine == null) { return store.getMetadata(null, true); } } indexCommit = engine.acquireLastIndexCommit(false); return store.getMetadata(indexCommit.getIndexCommit()); } finally { store.decRef(); IOUtils.close(indexCommit); } }
Example #6
Source File: IndexShard.java From crate with Apache License 2.0 | 6 votes |
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) { readAllowed(); final Engine engine = getEngine(); final Engine.Searcher searcher = engine.acquireSearcher(source, scope); boolean success = false; try { final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher); assert wrappedSearcher != null; success = true; return wrappedSearcher; } catch (IOException ex) { throw new ElasticsearchException("failed to wrap searcher", ex); } finally { if (success == false) { Releasables.close(success, searcher); } } }
Example #7
Source File: DefaultSearchContext.java From Elasticsearch with Apache License 2.0 | 6 votes |
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout ) { super(parseFieldMatcher, request); this.id = id; this.request = request; this.searchType = request.searchType(); this.shardTarget = shardTarget; this.engineSearcher = engineSearcher; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; // SearchContexts use a BigArrays that can circuit break this.bigArrays = bigArrays.withCircuitBreaking(); this.dfsResult = new DfsSearchResult(id, shardTarget); this.queryResult = new QuerySearchResult(id, shardTarget); this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeoutInMillis = timeout.millis(); }
Example #8
Source File: IndexShard.java From crate with Apache License 2.0 | 6 votes |
public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { try { changeState(IndexShardState.CLOSED, reason); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { if (engine != null && flushEngine) { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times // Also closing refreshListeners to prevent us from accumulating any more listeners IOUtils.close(engine, globalCheckpointListeners, refreshListeners); indexShardOperationPermits.close(); } } } }
Example #9
Source File: CollectTask.java From crate with Apache License 2.0 | 6 votes |
public void addSearcher(int searcherId, Engine.Searcher searcher) { if (isClosed()) { // if this is closed and addContext is called this means the context got killed. searcher.close(); return; } synchronized (subContextLock) { Engine.Searcher replacedSearcher = searchers.put(searcherId, searcher); if (replacedSearcher != null) { replacedSearcher.close(); searcher.close(); throw new IllegalArgumentException(String.format(Locale.ENGLISH, "ShardCollectContext for %d already added", searcherId)); } } }
Example #10
Source File: IndexShard.java From crate with Apache License 2.0 | 6 votes |
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }
Example #11
Source File: SyncedFlushService.java From Elasticsearch with Apache License 2.0 | 6 votes |
private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: return new ShardSyncedFlushResponse(); case COMMIT_MISMATCH: return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: return new ShardSyncedFlushResponse("pending operations"); default: throw new ElasticsearchException("unknown synced flush result [" + result + "]"); } }
Example #12
Source File: GroupByOptimizedIterator.java From crate with Apache License 2.0 | 6 votes |
static boolean hasHighCardinalityRatio(Supplier<Engine.Searcher> acquireSearcher, String fieldName) { // acquire separate searcher: // Can't use sharedShardContexts() yet, if we bail out the "getOrCreateContext" causes issues later on in the fallback logic try (Engine.Searcher searcher = acquireSearcher.get()) { for (LeafReaderContext leaf : searcher.reader().leaves()) { Terms terms = leaf.reader().terms(fieldName); if (terms == null) { return true; } double cardinalityRatio = terms.size() / (double) leaf.reader().numDocs(); if (cardinalityRatio > CARDINALITY_RATIO_THRESHOLD) { return true; } } } catch (IOException e) { return true; } return false; }
Example #13
Source File: IndexShard.java From Elasticsearch with Apache License 2.0 | 5 votes |
public MergeStats mergeStats() { final Engine engine = engineUnsafe(); if (engine == null) { return new MergeStats(); } return engine.getMergeStats(); }
Example #14
Source File: ShardTermVectorsService.java From Elasticsearch with Apache License 2.0 | 5 votes |
private Fields addGeneratedTermVectors(Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set<String> selectedFields) throws IOException { /* only keep valid fields */ Set<String> validFields = new HashSet<>(); for (String field : selectedFields) { MappedFieldType fieldType = indexShard.mapperService().smartNameFieldType(field); if (!isValidField(fieldType)) { continue; } // already retrieved, only if the analyzer hasn't been overridden at the field if (fieldType.storeTermVectors() && (request.perFieldAnalyzer() == null || !request.perFieldAnalyzer().containsKey(field))) { continue; } validFields.add(field); } if (validFields.isEmpty()) { return termVectorsByField; } /* generate term vectors from fetched document fields */ GetResult getResult = indexShard.getService().get( get, request.id(), request.type(), validFields.toArray(Strings.EMPTY_ARRAY), null, false); Fields generatedTermVectors = generateTermVectors(getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields); /* merge with existing Fields */ if (termVectorsByField == null) { return generatedTermVectors; } else { return mergeFields(termVectorsByField, generatedTermVectors); } }
Example #15
Source File: CollectTask.java From crate with Apache License 2.0 | 5 votes |
private void closeSearchContexts() { synchronized (subContextLock) { for (ObjectCursor<Engine.Searcher> cursor : searchers.values()) { cursor.value.close(); } searchers.clear(); } }
Example #16
Source File: MockEngineSupport.java From crate with Apache License 2.0 | 5 votes |
SearcherCloseable(final Engine.Searcher wrappedSearcher, Logger logger, InFlightSearchers inFlightSearchers) { // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher // with a wrapped reader. this.wrappedSearcher = wrappedSearcher; this.logger = logger; initialRefCount = wrappedSearcher.reader().getRefCount(); this.inFlightSearchers = inFlightSearchers; assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed"; inFlightSearchers.add(this, wrappedSearcher.source()); }
Example #17
Source File: IndexShard.java From Elasticsearch with Apache License 2.0 | 5 votes |
public Engine.Index prepareIndexOnReplica(SourceToParse source, long version, VersionType versionType, boolean canHaveDuplicates) { try { return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA, state != IndexShardState.STARTED || canHaveDuplicates); } catch (Throwable t) { verifyNotClosed(t); throw t; } }
Example #18
Source File: IndexShard.java From Elasticsearch with Apache License 2.0 | 5 votes |
public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType, boolean canHaveDuplicates) { try { if (shardRouting.primary() == false) { throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary"); } return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY, state != IndexShardState.STARTED || canHaveDuplicates); } catch (Throwable t) { verifyNotClosed(t); throw t; } }
Example #19
Source File: IndexShard.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable failure) { for (Engine.FailedEngineListener listener : delegates) { try { listener.onFailedEngine(shardId, reason, failure); } catch (Exception e) { logger.warn("exception while notifying engine failure", e); } } }
Example #20
Source File: ReservoirSampler.java From crate with Apache License 2.0 | 5 votes |
public Samples getSamples(RelationName relationName, List<Reference> columns, int maxSamples) { TableInfo table; try { table = schemas.getTableInfo(relationName); } catch (RelationUnknown e) { return Samples.EMPTY; } if (!(table instanceof DocTableInfo)) { return Samples.EMPTY; } DocTableInfo docTable = (DocTableInfo) table; Random random = Randomness.get(); MetaData metaData = clusterService.state().metaData(); CoordinatorTxnCtx coordinatorTxnCtx = CoordinatorTxnCtx.systemTransactionContext(); List<Streamer> streamers = Arrays.asList(Symbols.streamerArray(columns)); List<Engine.Searcher> searchersToRelease = new ArrayList<>(); CircuitBreaker breaker = circuitBreakerService.getBreaker(HierarchyCircuitBreakerService.QUERY); RamAccounting ramAccounting = new BlockBasedRamAccounting( b -> breaker.addEstimateBytesAndMaybeBreak(b, "Reservoir-sampling"), MAX_BLOCK_SIZE_IN_BYTES); try { return getSamples( columns, maxSamples, docTable, random, metaData, coordinatorTxnCtx, streamers, searchersToRelease, ramAccounting ); } finally { ramAccounting.close(); for (Engine.Searcher searcher : searchersToRelease) { searcher.close(); } } }
Example #21
Source File: IndexShard.java From crate with Apache License 2.0 | 5 votes |
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { verifyNotClosed(); logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException(shardId(), state, "syncFlush is only allowed if the engine is not recovery" + " from translog"); } return engine.syncFlush(syncId, expectedCommitId); }
Example #22
Source File: IndexingOperationListener.java From crate with Apache License 2.0 | 5 votes |
@Override public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { assert delete != null; for (IndexingOperationListener listener : listeners) { try { listener.preDelete(shardId, delete); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); } } return delete; }
Example #23
Source File: IndexShard.java From crate with Apache License 2.0 | 5 votes |
private Engine.DeleteResult applyDeleteOperation(Engine engine, long seqNo, long opPrimaryTerm, long version, String type, String id, @Nullable VersionType versionType, long ifSeqNo, long ifPrimaryTerm, Engine.Operation.Origin origin) throws IOException { assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm + "]"; ensureWriteAllowed(origin); // When there is a single type, the unique identifier is only composed of the _id, // so there is no way to differentiate foo#1 from bar#1. This is especially an issue // if a user first deletes foo#1 and then indexes bar#1: since we do not encode the // _type in the uid it might look like we are reindexing the same document, which // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. // In order to work around this issue, we make deletions create types. This way, we // fail if index and delete operations do not use the same type. final Term uid = extractUidForDelete(type, id); final Engine.Delete delete = prepareDelete( type, id, uid, seqNo, opPrimaryTerm, version, versionType, origin, ifSeqNo, ifPrimaryTerm ); return delete(engine, delete); }
Example #24
Source File: IndexShard.java From crate with Apache License 2.0 | 5 votes |
/** * opens the engine on top of the existing lucene engine and translog. * Operations from the translog will be replayed to bring lucene up to date. **/ public void openEngineAndRecoverFromTranslog() throws IOException { final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog(); final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> { translogRecoveryStats.totalOperations(snapshot.totalOperations()); translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations()); return runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY, translogRecoveryStats::incrementRecoveredOperations); }; innerOpenEngineAndTranslog(); getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE); }
Example #25
Source File: IndexShard.java From crate with Apache License 2.0 | 5 votes |
/** * Tests whether or not the engine should be flushed periodically. * This test is based on the current size of the translog compared to the configured flush threshold size. * * @return {@code true} if the engine should be flushed */ boolean shouldPeriodicallyFlush() { final Engine engine = getEngineOrNull(); if (engine != null) { try { return engine.shouldPeriodicallyFlush(); } catch (final AlreadyClosedException e) { // we are already closed, no need to flush or roll } } return false; }
Example #26
Source File: TransportShardDeleteAction.java From crate with Apache License 2.0 | 5 votes |
@Override protected WriteReplicaResult<ShardDeleteRequest> processRequestItemsOnReplica(IndexShard indexShard, ShardDeleteRequest request) throws IOException { Translog.Location translogLocation = null; for (ShardDeleteRequest.Item item : request.items()) { int location = item.location(); if (request.skipFromLocation() == location) { // skipping this and all next items, the primary did not processed them (mostly due to a kill request) break; } // Only execute delete operation on replica if the sequence number was applied from primary. // If that's not the case, the delete on primary didn't succeed. Note that we still need to // process the other items in case of a bulk request. if (item.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { Engine.DeleteResult deleteResult = indexShard.applyDeleteOperationOnReplica(item.seqNo(), item.version(), Constants.DEFAULT_MAPPING_TYPE, item.id()); translogLocation = deleteResult.getTranslogLocation(); if (logger.isTraceEnabled()) { logger.trace("shardId={} REPLICA: successfully deleted id={}", request.shardId(), item.id()); } } } return new WriteReplicaResult<>(request, translogLocation, null, indexShard, logger); }
Example #27
Source File: IndexShard.java From crate with Apache License 2.0 | 5 votes |
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); Engine engine = getEngine(); engine.refresh("recovery_finalization"); engine.config().setEnableGcDeletes(true); }
Example #28
Source File: MockEngineSupport.java From crate with Apache License 2.0 | 5 votes |
public Engine.Searcher wrapSearcher(Engine.Searcher engineSearcher) { final AssertingIndexSearcher assertingIndexSearcher = newSearcher(engineSearcher); assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity()); /* * pass the original searcher to the super.newSearcher() method to * make sure this is the searcher that will be released later on. * If we wrap an index reader here must not pass the wrapped version * to the manager on release otherwise the reader will be closed too * early. - good news, stuff will fail all over the place if we don't * get this right here */ SearcherCloseable closeable = new SearcherCloseable(engineSearcher, logger, inFlightSearchers); return new Engine.Searcher(engineSearcher.source(), assertingIndexSearcher, closeable); }
Example #29
Source File: ShardIndexingService.java From Elasticsearch with Apache License 2.0 | 5 votes |
public Engine.Create preCreate(Engine.Create create) { totalStats.indexCurrent.inc(); typeStats(create.type()).indexCurrent.inc(); for (IndexingOperationListener listener : listeners) { create = listener.preCreate(create); } return create; }
Example #30
Source File: WebSocketIndexListener.java From es-change-feed-plugin with Apache License 2.0 | 5 votes |
@Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { ChangeEvent change=new ChangeEvent( shardId.getIndex().getName(), index.type(), index.id(), new DateTime(), result.isCreated() ? ChangeEvent.Operation.CREATE : ChangeEvent.Operation.INDEX, result.getVersion(), index.source() ); addChange(change); }