org.elasticsearch.search.internal.ShardSearchRequest Java Examples

The following examples show how to use org.elasticsearch.search.internal.ShardSearchRequest. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IndicesRequestCache.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
 * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
 * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
 * the same cache.
 */
public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception {
    assert canCache(request, context);
    Key key = buildKey(request, context);
    Loader loader = new Loader(queryPhase, context, key);
    Value value = cache.get(key, loader);
    if (loader.isLoaded()) {
        key.shard.requestCache().onMiss();
        // see if its the first time we see this reader, and make sure to register a cleanup key
        CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
        if (!registeredClosedListeners.containsKey(cleanupKey)) {
            Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
            if (previous == null) {
                ElasticsearchDirectoryReader.addReaderCloseListener(context.searcher().getDirectoryReader(), cleanupKey);
            }
        }
    } else {
        key.shard.requestCache().onHit();
        // restore the cached query result into the context
        final QuerySearchResult result = context.queryResult();
        result.readFromWithId(context.id(), value.reference.streamInput());
        result.shardTarget(context.shardTarget());
    }
}
 
Example #2
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
final SearchContext createAndPutContext(ShardSearchRequest request) {
    SearchContext context = createContext(request, null);
    boolean success = false;
    try {
        putContext(context);
        if (request.scroll() != null) {
            context.indexShard().searchService().onNewScrollContext(context);
        }
        context.indexShard().searchService().onNewContext(context);
        success = true;
        return context;
    } finally {
        if (!success) {
            freeContext(context.id());
        }
    }
}
 
Example #3
Source File: IndicesRequestCache.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception {
    // TODO: for now, this will create different keys for different JSON order
    // TODO: tricky to get around this, need to parse and order all, which can be expensive
    return new Key(context.indexShard(),
            ((DirectoryReader) context.searcher().getIndexReader()).getVersion(),
            request.cacheKey());
}
 
Example #4
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public DfsSearchResult executeDfsPhase(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    try {
        contextProcessing(context);
        dfsPhase.execute(context);
        contextProcessedSuccessfully(context);
        return context.dfsResult();
    } catch (Throwable e) {
        logger.trace("Dfs phase failed", e);
        processFailure(context, e);
        throw ExceptionsHelper.convertToRuntime(e);
    } finally {
        cleanContext(context);
    }
}
 
Example #5
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Deprecated // remove in 3.0
public QuerySearchResult executeScan(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final int originalSize = context.size();
    deprecationLogger.deprecated("[search_type=scan] is deprecated, please use a regular scroll that sorts on [_doc] instead");
    try {
        if (context.aggregations() != null) {
            throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
        }

        if (context.scrollContext() == null || context.scrollContext().scroll == null) {
            throw new ElasticsearchException("Scroll must be provided when scanning...");
        }

        assert context.searchType() == SearchType.SCAN;
        context.searchType(SearchType.QUERY_THEN_FETCH); // move to QUERY_THEN_FETCH, and then, when scrolling, move to SCAN
        context.size(0); // set size to 0 so that we only count matches
        assert context.searchType() == SearchType.QUERY_THEN_FETCH;

        contextProcessing(context);
        queryPhase.execute(context);
        contextProcessedSuccessfully(context);
        return context.queryResult();
    } catch (Throwable e) {
        logger.trace("Scan phase failed", e);
        processFailure(context, e);
        throw ExceptionsHelper.convertToRuntime(e);
    } finally {
        context.size(originalSize);
        cleanContext(context);
    }
}
 
Example #6
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Try to load the query results from the cache or execute the query phase directly if the cache cannot be used.
 */
private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final SearchContext context,
        final QueryPhase queryPhase) throws Exception {
    final boolean canCache = indicesQueryCache.canCache(request, context);
    if (canCache) {
        indicesQueryCache.loadIntoContext(request, context, queryPhase);
    } else {
        queryPhase.execute(context);
    }
}
 
Example #7
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) {
    final SearchContext context = createAndPutContext(request);
    final ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
        shardSearchStats.onPreQueryPhase(context);
        long time = System.nanoTime();
        contextProcessing(context);

        loadOrExecuteQueryPhase(request, context, queryPhase);

        if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
            freeContext(context.id());
        } else {
            contextProcessedSuccessfully(context);
        }
        shardSearchStats.onQueryPhase(context, System.nanoTime() - time);

        return context.queryResult();
    } catch (Throwable e) {
        // execution exception can happen while loading the cache, strip it
        if (e instanceof ExecutionException) {
            e = e.getCause();
        }
        shardSearchStats.onFailedQueryPhase(context);
        logger.trace("Query phase failed", e);
        processFailure(context, e);
        throw ExceptionsHelper.convertToRuntime(e);
    } finally {
        cleanContext(context);
    }
}
 
Example #8
Source File: SearchIntoContext.java    From elasticsearch-inout-plugin with Apache License 2.0 5 votes vote down vote up
public SearchIntoContext(long id, ShardSearchRequest request,
        SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
        IndexService indexService, IndexShard indexShard,
        ScriptService scriptService, CacheRecycler cacheRecycler) {
    super(id, request, shardTarget, engineSearcher, indexService,
            indexShard, scriptService, cacheRecycler);
}
 
Example #9
Source File: AbstractTransportExportAction.java    From elasticsearch-inout-plugin with Apache License 2.0 5 votes vote down vote up
@Override
protected ShardExportResponse shardOperation(ShardExportRequest request) throws ElasticSearchException {


    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
    ExportContext context = new ExportContext(0,
        new ShardSearchRequest().types(request.types()).filteringAliases(request.filteringAliases()),
        shardTarget, indexShard.searcher(), indexService, indexShard, scriptService, cacheRecycler, nodePath);
    ExportContext.setCurrent(context);

    try {
        BytesReference source = request.source();
        exportParser.parseSource(context, source);
        context.preProcess();
        exporter.check(context);
        try {
            if (context.explain()) {
                return new ShardExportResponse(shardTarget.nodeIdText(), request.index(), request.shardId(), context.outputCmd(), context.outputCmdArray(), context.outputFile());
            } else {
                Exporter.Result res = exporter.execute(context);
                return new ShardExportResponse(shardTarget.nodeIdText(), request.index(), request.shardId(), context.outputCmd(), context.outputCmdArray(), context.outputFile(), res.outputResult.stdErr, res.outputResult.stdOut, res.outputResult.exit, res.numExported);
            }

        } catch (Exception e) {
            throw new QueryPhaseExecutionException(context, "failed to execute export", e);
        }
    } finally {
        // this will also release the index searcher
        context.release();
        SearchContext.removeCurrent();
    }
}
 
Example #10
Source File: PercolateContext.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public ShardSearchRequest request() {
    throw new UnsupportedOperationException();
}
 
Example #11
Source File: IndicesRequestCache.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Can the shard request be cached at all?
 */
public boolean canCache(ShardSearchRequest request, SearchContext context) {
    // TODO: for now, template is not supported, though we could use the generated bytes as the key
    if (hasLength(request.templateSource())) {
        return false;
    }

    // for now, only enable it for requests with no hits
    if (context.size() != 0) {
        return false;
    }

    // We cannot cache with DFS because results depend not only on the content of the index but also
    // on the overridden statistics. So if you ran two queries on the same index with different stats
    // (because an other shard was updated) you would get wrong results because of the scores
    // (think about top_hits aggs or scripts using the score)
    if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
        return false;
    }

    IndexMetaData index = clusterService.state().getMetaData().index(request.index());
    if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted
        return false;
    }
    // if not explicitly set in the request, use the index setting, if not, use the request
    if (request.requestCache() == null) {
        if (!isCacheEnabled(index.getSettings(), Boolean.FALSE)) {
            return false;
        }
    } else if (!request.requestCache()) {
        return false;
    }
    // if the reader is not a directory reader, we can't get the version from it
    if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) {
        return false;
    }
    // if now in millis is used (or in the future, a more generic "isDeterministic" flag
    // then we can't cache based on "now" key within the search request, as it is not deterministic
    if (context.nowInMillisUsed()) {
        return false;
    }
    return true;
}
 
Example #12
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());
    String searchSource = "search";
    if (request.hasHeader("search_source")) {
        searchSource = request.getHeader("search_source");
    }
    Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher(searchSource) : searcher;

    DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
    SearchContext.setCurrent(context);
    try {
        if (request.scroll() != null) {
            context.scrollContext(new ScrollContext());
            context.scrollContext().scroll = request.scroll();
        }

        parseTemplate(request, context);
        parseSource(context, request.source());
        parseSource(context, request.extraSource());

        // if the from and size are still not set, default them
        if (context.from() == -1) {
            context.from(0);
        }
        if (context.searchType() == SearchType.COUNT) {
            // so that the optimizations we apply to size=0 also apply to search_type=COUNT
            // and that we close contexts when done with the query phase
            context.searchType(SearchType.QUERY_THEN_FETCH);
            context.size(0);
        } else if (context.size() == -1) {
            context.size(10);
        }

        if (context.request().isProfile()) {
            context.setProfilers(new Profilers(context.searcher()));
        }

        // pre process
        dfsPhase.preProcess(context);
        queryPhase.preProcess(context);
        fetchPhase.preProcess(context);

        // compute the context keep alive
        long keepAlive = defaultKeepAlive;
        if (request.scroll() != null && request.scroll().keepAlive() != null) {
            keepAlive = request.scroll().keepAlive().millis();
        }
        context.keepAlive(keepAlive);
    } catch (Throwable e) {
        context.close();
        throw ExceptionsHelper.convertToRuntime(e);
    }

    return context;
}
 
Example #13
Source File: SearchService.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public TerminationHandle internalWarm(final IndexShard indexShard, final IndexMetaData indexMetaData, final IndicesWarmer.WarmerContext warmerContext, ThreadPool threadPool, final boolean top) {
    IndexWarmersMetaData custom = indexMetaData.custom(IndexWarmersMetaData.TYPE);
    if (custom == null) {
        return TerminationHandle.NO_WAIT;
    }
    final Executor executor = threadPool.executor(executor());
    final CountDownLatch latch = new CountDownLatch(custom.entries().size());
    for (final IndexWarmersMetaData.Entry entry : custom.entries()) {
        executor.execute(new Runnable() {

            @Override
            public void run() {
                SearchContext context = null;
                try {
                    long now = System.nanoTime();
                    ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.getNumberOfShards(),
                            SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache());
                    context = createContext(request, warmerContext.searcher());
                    // if we use sort, we need to do query to sort on it and load relevant field data
                    // if not, we might as well set size=0 (and cache if needed)
                    if (context.sort() == null) {
                        context.size(0);
                    }
                    boolean canCache = indicesQueryCache.canCache(request, context);
                    // early terminate when we can cache, since we can only do proper caching on top level searcher
                    // also, if we can't cache, and its top, we don't need to execute it, since we already did when its not top
                    if (canCache != top) {
                        return;
                    }
                    loadOrExecuteQueryPhase(request, context, queryPhase);
                    long took = System.nanoTime() - now;
                    if (indexShard.warmerService().logger().isTraceEnabled()) {
                        indexShard.warmerService().logger().trace("warmed [{}], took [{}]", entry.name(), TimeValue.timeValueNanos(took));
                    }
                } catch (Throwable t) {
                    indexShard.warmerService().logger().warn("warmer [{}] failed", t, entry.name());
                } finally {
                    try {
                        if (context != null) {
                            freeContext(context.id());
                            cleanContext(context);
                        }
                    } finally {
                        latch.countDown();
                    }
                }
            }

        });
    }
    return new TerminationHandle() {
        @Override
        public void awaitTermination() throws InterruptedException {
            latch.await();
        }
    };
}
 
Example #14
Source File: ShardDfsOnlyRequest.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public ShardSearchRequest getShardSearchRequest() {
    return shardSearchRequest;
}
 
Example #15
Source File: TransportTermsByQueryAction.java    From siren-join with GNU Affero General Public License v3.0 4 votes vote down vote up
/**
 * The operation that executes the query and generates a {@link TermsByQueryShardResponse} for each shard.
 */
@Override
protected TermsByQueryShardResponse shardOperation(TermsByQueryShardRequest shardRequest) throws ElasticsearchException {
  IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId().getIndex());
  IndexShard indexShard = indexService.shardSafe(shardRequest.shardId().id());
  TermsByQueryRequest request = shardRequest.request();
  OrderByShardOperation orderByOperation = OrderByShardOperation.get(request.getOrderBy(), request.maxTermsPerShard());

  SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(),
                                                        shardRequest.shardId().getIndex(),
                                                        shardRequest.shardId().id());

  ShardSearchRequest shardSearchRequest = new ShardSearchLocalRequest(request.types(), request.nowInMillis(),
                                                                      shardRequest.filteringAliases());

  SearchContext context = new DefaultSearchContext(0, shardSearchRequest, shardTarget,
    indexShard.acquireSearcher("termsByQuery"), indexService, indexShard, scriptService,
    pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
    SearchService.NO_TIMEOUT);
  SearchContext.setCurrent(context);

  try {
    MappedFieldType fieldType = context.smartNameFieldType(request.field());
    if (fieldType == null) {
      throw new SearchContextException(context, "[termsByQuery] field '" + request.field() +
              "' not found for types " + Arrays.toString(request.types()));
    }

    IndexFieldData indexFieldData = context.fieldData().getForField(fieldType);

    BytesReference querySource = request.querySource();
    if (querySource != null && querySource.length() > 0) {
      XContentParser queryParser = null;
      try {
        queryParser = XContentFactory.xContent(querySource).createParser(querySource);
        QueryParseContext.setTypes(request.types());
        ParsedQuery parsedQuery = orderByOperation.getParsedQuery(queryParser, indexService);
        if (parsedQuery != null) {
          context.parsedQuery(parsedQuery);
        }
      }
      finally {
        QueryParseContext.removeTypes();
        if (queryParser != null) {
          queryParser.close();
        }
      }
    }

    context.preProcess();

    // execute the search only gathering the hit count and bitset for each segment
    logger.debug("{}: Executes search for collecting terms {}", Thread.currentThread().getName(),
      shardRequest.shardId());

    TermsCollector termsCollector = this.getTermsCollector(request.termsEncoding(), indexFieldData, context);
    if (request.expectedTerms() != null) termsCollector.setExpectedTerms(request.expectedTerms());
    if (request.maxTermsPerShard() != null) termsCollector.setMaxTerms(request.maxTermsPerShard());
    HitStream hitStream = orderByOperation.getHitStream(context);
    TermsSet terms = termsCollector.collect(hitStream);

    logger.debug("{}: Returns terms response with {} terms for shard {}", Thread.currentThread().getName(),
      terms.size(), shardRequest.shardId());

    return new TermsByQueryShardResponse(shardRequest.shardId(), terms);
  }
  catch (Throwable e) {
    logger.error("[termsByQuery] Error executing shard operation", e);
    throw new QueryPhaseExecutionException(context, "[termsByQuery] Failed to execute query", e);
  }
  finally {
    // this will also release the index searcher
    context.close();
    SearchContext.removeCurrent();
  }
}
 
Example #16
Source File: AbstractTransportSearchIntoAction.java    From elasticsearch-inout-plugin with Apache License 2.0 4 votes vote down vote up
@Override
protected ShardSearchIntoResponse shardOperation(ShardSearchIntoRequest
        request) throws ElasticSearchException {

    IndexService indexService = indicesService.indexServiceSafe(
            request.index());
    IndexShard indexShard = indexService.shardSafe(request.shardId());

    SearchShardTarget shardTarget = new SearchShardTarget(
            clusterService.localNode().id(), request.index(),
            request.shardId());
    SearchIntoContext context = new SearchIntoContext(0,
        new ShardSearchRequest().types(request.types()).filteringAliases(request.filteringAliases()),
        shardTarget, indexShard.searcher(), indexService, indexShard, scriptService, cacheRecycler
    );
    SearchIntoContext.setCurrent(context);

    try {
        BytesReference source = request.source();
        parser.parseSource(context, source);
        context.preProcess();
        try {
            if (context.explain()) {
                return new ShardSearchIntoResponse(
                        shardTarget.nodeIdText(), request.index(),
                        request.shardId());
            } else {
                WriterResult res = writer.execute(context);
                return new ShardSearchIntoResponse(
                        shardTarget.nodeIdText(), request.index(),
                        request.shardId(), res);
            }

        } catch (Exception e) {
            throw new QueryPhaseExecutionException(context,
                    "failed to execute inout", e);
        }
    } finally {
        // this will also release the index searcher
        context.release();
        SearchContext.removeCurrent();
    }
}
 
Example #17
Source File: ExportContext.java    From elasticsearch-inout-plugin with Apache License 2.0 4 votes vote down vote up
public ExportContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
                     Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
                     ScriptService scriptService, CacheRecycler cacheRecycler, String nodePath) {
    super(id, request, shardTarget, engineSearcher, indexService, indexShard, scriptService, cacheRecycler);
    this.nodePath = nodePath;
}