org.elasticsearch.ElasticsearchException Java Examples
The following examples show how to use
org.elasticsearch.ElasticsearchException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IndexFieldTerm.java From Elasticsearch with Apache License 2.0 | 6 votes |
public void setDocument(int docId) { assert (postings != null); try { // we try to advance to the current document. int currentDocPos = postings.docID(); if (currentDocPos < docId) { currentDocPos = postings.advance(docId); } if (currentDocPos == docId) { freq = postings.freq(); } else { freq = 0; } iterator.nextDoc(); } catch (IOException e) { throw new ElasticsearchException("While trying to initialize term positions in IndexFieldTerm.setNextDoc() ", e); } }
Example #2
Source File: GathererService.java From elasticsearch-gatherer with Apache License 2.0 | 6 votes |
@Override protected void doStart() throws ElasticSearchException { // announce default gatherers announceGatherers(); // the load updater task ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(daemonThreadFactory(settings, "gatherer_load_watcher")); executorService.scheduleAtFixedRate(new Thread() { public void run() { double load = operatingSystemMXBean.getSystemLoadAverage(); nodeService.putAttribute(GathererConstants.GATHERER_LOAD, Double.toString(load)); } }, 1L, 1L, TimeUnit.MINUTES); // the queue length updater task executorService.scheduleAtFixedRate(new Thread() { public void run() { int length = 0; // TODO getPendingJobs(); nodeService.putAttribute(GathererConstants.GATHERER_LENGTH, Integer.toString(length)); } }, 1L, 5L, TimeUnit.SECONDS); logger.info("started"); }
Example #3
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * 初回リクエストでNoNodeAvailableException、リトライ1回目で成功した場合、適切な復帰値が返ること. */ @Test public void 初回リクエストでNoNodeAvailableException_リトライ1回目で成功した場合_適切な復帰値が返ること() { TestRequest requestMock = Mockito.spy(new TestRequest()); NoNodeAvailableException toBeThrown = Mockito.mock(NoNodeAvailableException.class); Mockito.doThrow(toBeThrown) .doReturn(SUCCESS_RESPONSE) .when(requestMock) .doProcess(); String result = requestMock.doRequest(); assertEquals(SUCCESS_RESPONSE, result); Mockito.verify(requestMock, Mockito.times(2)).doProcess(); Mockito.verify(requestMock, Mockito.times(0)).onParticularError(Mockito.any(ElasticsearchException.class)); }
Example #4
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * リトライ処理中の特定例外処理からContinueRetryが投げられた後、リトライ処理に移行すること. */ @Test public void リトライ処理中の特定例外処理からContinueRetryが投げられた後_リトライ処理に移行すること() { TestRequest requestMock = Mockito.spy(new TestRequest()); NodeDisconnectedException toBeThrown = Mockito.mock(NodeDisconnectedException.class); Mockito.doThrow(toBeThrown) // 初回リクエスト .doThrow(toBeThrown) // リトライ1回目 .doThrow(new SettingsException("foo")) // リトライ2回目. この時は、 #onParticularError()でリトライ継続のために // ContinueRetryが投げられる. .doThrow(toBeThrown) // リトライ3回目 .doReturn(SUCCESS_RESPONSE) .when(requestMock) .doProcess(); String result = requestMock.doRequest(); assertEquals(SUCCESS_RESPONSE, result); // 初回 + リトライ3回 + 処理成功で、5回呼ばれるはず. Mockito.verify(requestMock, Mockito.times(5)).doProcess(); Mockito.verify(requestMock, Mockito.times(1)).onParticularError(Mockito.any(ElasticsearchException.class)); }
Example #5
Source File: FieldWriter.java From elasticsearch-inout-plugin with Apache License 2.0 | 6 votes |
/** * Method to recursively create a nested object */ private void writeMap(Map<String, Object> root, Object value, String part) { if (part.contains(".")) { String[] parts = part.split("\\.", 2); Object o = root.get(parts[0]); if (o == null) { o = new HashMap<String, Object>(); } else if (!(o instanceof Map)) { throw new ElasticSearchException("Error on rewriting objects: Mixed objects and values"); } Map<String, Object> sub = (Map<String, Object>) o; writeMap(sub, value, parts[1]); root.put(parts[0], sub); } else { if (((Map<String, Object>) root).get(part) instanceof Map) { throw new ElasticSearchException("Error on rewriting objects: Mixed objects and values"); } root.put(part, value); } }
Example #6
Source File: AdapterActionFuture.java From Elasticsearch with Apache License 2.0 | 6 votes |
static RuntimeException rethrowExecutionException(ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { ElasticsearchException esEx = (ElasticsearchException) e.getCause(); Throwable root = esEx.unwrapCause(); if (root instanceof ElasticsearchException) { return (ElasticsearchException) root; } else if (root instanceof RuntimeException) { return (RuntimeException) root; } return new UncategorizedExecutionException("Failed execution", root); } else if (e.getCause() instanceof RuntimeException) { return (RuntimeException) e.getCause(); } else { return new UncategorizedExecutionException("Failed execution", e); } }
Example #7
Source File: TransportShardMultiTermsVectorAction.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) { MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse(); for (int i = 0; i < request.locations.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); try { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(shardId.id()); TermVectorsResponse termVectorsResponse = indexShard.termVectorsService().getTermVectors(termVectorsRequest, shardId.getIndex()); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); response.add(request.locations.get(i), termVectorsResponse); } catch (Throwable t) { if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { logger.debug("{} failed to execute multi term vectors for [{}]/[{}]", t, shardId, termVectorsRequest.type(), termVectorsRequest.id()); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } } } return response; }
Example #8
Source File: SyncedFlushService.java From Elasticsearch with Apache License 2.0 | 6 votes |
private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: return new ShardSyncedFlushResponse(); case COMMIT_MISMATCH: return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: return new ShardSyncedFlushResponse("pending operations"); default: throw new ElasticsearchException("unknown synced flush result [" + result + "]"); } }
Example #9
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * NoNodeAvailableExceptionが続き, リトライ3回目で成功した場合、適切な復帰値が返ること. */ @Test public void NoNodeAvailableExceptionが続き_リトライ3回目で成功した場合_適切な復帰値が返ること() { TestRequest requestMock = Mockito.spy(new TestRequest()); NoNodeAvailableException toBeThrown = Mockito.mock(NoNodeAvailableException.class); Mockito.doThrow(toBeThrown) // 初回 .doThrow(toBeThrown) // リトライ1回目 .doThrow(toBeThrown) // リトライ2回目 .doReturn(SUCCESS_RESPONSE) // リトライ3回目で正常復帰 .when(requestMock) .doProcess(); String result = requestMock.doRequest(); assertEquals(SUCCESS_RESPONSE, result); // doProcessが4回呼び出されるはず Mockito.verify(requestMock, Mockito.times(4)).doProcess(); Mockito.verify(requestMock, Mockito.times(0)).onParticularError(Mockito.any(ElasticsearchException.class)); }
Example #10
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * 初回リクエスト時に特定例外が発生した場合、特定例外用処理が呼び出されてレスポンスが返ること. */ @Test public void 初回リクエスト時に特定例外が発生した場合_特定例外用処理が呼び出されてレスポンスが返ること() { TestRequest requestMock = Mockito.spy(new TestRequest()); EsExceptionForTest toBeThrown = Mockito.mock(EsExceptionForTest.class); Mockito.doThrow(toBeThrown) // 初回 .when(requestMock) .doProcess(); String result = requestMock.doRequest(); assertEquals(ON_ERROR_RESPONSE, result); Mockito.verify(requestMock, Mockito.times(1)).doProcess(); Mockito.verify(requestMock, Mockito.times(1)).onParticularError(Mockito.any(ElasticsearchException.class)); }
Example #11
Source File: FactSearchManager.java From act-platform with ISC License | 6 votes |
private ScrollingSearchResult.ScrollingBatch<FactDocument> fetchNextFactsBatch(String scrollId) { SearchResponse response; try { SearchScrollRequest request = new SearchScrollRequest() .scrollId(scrollId) .scroll(searchScrollExpiration); response = clientFactory.getClient().scroll(request, RequestOptions.DEFAULT); } catch (ElasticsearchException | IOException ex) { LOGGER.warning(ex, "Could not perform request to retrieve next batch of search results. Stop scrolling."); return ScrollingSearchResult.emptyBatch(); } if (response.status() != RestStatus.OK) { LOGGER.warning("Could not retrieve next batch of search results (response code %s). Stop scrolling.", response.status()); return ScrollingSearchResult.emptyBatch(); } return createFactsBatch(response); }
Example #12
Source File: IndexState.java From trident-elasticsearch with Apache License 2.0 | 6 votes |
private void runBulk(BulkRequestBuilder bulkRequest) { if(bulkRequest.numberOfActions() > 0) { int tryCount = 0; boolean shouldTryAgain; do { shouldTryAgain = false; try { BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { shouldTryAgain = this.exceptionHandler.onBulkRequestFailure(bulkResponse, tryCount); tryCount++; } } catch (ElasticsearchException e) { shouldTryAgain = this.exceptionHandler.onElasticSearchException(e, tryCount); tryCount++; } } while (shouldTryAgain); } else { LOG.debug("Empty batch being submitted"); } }
Example #13
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * 初回リクエストでリトライ対象外の例外が発生した場合、リトライせずに初回例外を投げること. */ @Test(expected = EsClientException.class) public void 初回リクエストでリトライ対象外の例外が発生した場合_リトライせずに初回例外を投げること() { TestRequest requestMock = Mockito.spy(new TestRequest()); Mockito.doThrow(new IndexMissingException(new Index("abc"))) // なぜかモック例外だとうまく動かなかった. .when(requestMock) .doProcess(); try { requestMock.doRequest(); fail("Should not return"); } finally { Mockito.verify(requestMock, Mockito.times(1)).doProcess(); Mockito.verify(requestMock, Mockito.times(0)).onParticularError(Mockito.any(ElasticsearchException.class)); } }
Example #14
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * 初回リクエストでリトライ対象外の例外が発生した場合、リトライせずに初回例外を投げること. */ @Test(expected = EsClientException.class) public void 初回リクエストでリトライ対象外の例外が発生した場合_リトライせずに初回例外を投げること() { TestRequest requestMock = Mockito.spy(new TestRequest()); Mockito.doThrow(new IndexNotFoundException("abc")) // なぜかモック例外だとうまく動かなかった. .when(requestMock) .doProcess(); try { requestMock.doRequest(); fail("Should not return"); } finally { Mockito.verify(requestMock, Mockito.times(1)).doProcess(); Mockito.verify(requestMock, Mockito.times(0)).onParticularError(Mockito.any(ElasticsearchException.class)); } }
Example #15
Source File: ESIndexState.java From storm-trident-elasticsearch with Apache License 2.0 | 6 votes |
public void bulkUpdateIndices(List<TridentTuple> inputs, TridentTupleMapper<Document<T>> mapper, BulkResponseHandler handler) { BulkRequestBuilder bulkRequest = client.prepareBulk(); for (TridentTuple input : inputs) { Document<T> doc = mapper.map(input); byte[] source = serializeSourceOrFail(doc); IndexRequestBuilder request = client.prepareIndex(doc.getName(), doc.getType(), doc.getId()).setSource(source); if(doc.getParentId() != null) { request.setParent(doc.getParentId()); } bulkRequest.add(request); } if( bulkRequest.numberOfActions() > 0) { try { handler.handle(bulkRequest.execute().actionGet()); } catch(ElasticsearchException e) { LOGGER.error("error while executing bulk request to elasticsearch"); throw new FailedException("Failed to store data into elasticsearch", e); } } }
Example #16
Source File: IngestTransportClient.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
@Override public IngestTransportClient delete(String index, String type, String id) { if (closed) { if (throwable != null) { throw new ElasticsearchException("client is closed, possible reason: ", throwable); } else { throw new ElasticsearchException("client is closed"); } } try { metric.getCurrentIngest().inc(index, type, id); ingestProcessor.add(new DeleteRequest(index).type(type).id(id)); } catch (Exception e) { logger.error("add of delete request failed: " + e.getMessage(), e); throwable = e; closed = true; } return this; }
Example #17
Source File: TransportShardMultiPercolateAction.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override protected Response shardOperation(Request request, ShardId shardId) { // TODO: Look into combining the shard req's docs into one in memory index. Response response = new Response(); response.items = new ArrayList<>(request.items.size()); for (Request.Item item : request.items) { Response.Item responseItem; int slot = item.slot; try { responseItem = new Response.Item(slot, percolatorService.percolate(item.request)); } catch (Throwable t) { if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { logger.debug("{} failed to multi percolate", t, request.shardId()); responseItem = new Response.Item(slot, t); } } response.items.add(responseItem); } return response; }
Example #18
Source File: AbstractRetryableEsRequestTest.java From io with Apache License 2.0 | 6 votes |
/** * ClusterBlockExceptionが続き_リトライ5回目で成功した場合_適切な復帰値が返ること. */ @Test public void ClusterBlockExceptionが続き_リトライ5回目で成功した場合_適切な復帰値が返ること() { TestRequest requestMock = Mockito.spy(new TestRequest()); ClusterBlockException toBeThrown = Mockito.mock(ClusterBlockException.class); Mockito.doThrow(toBeThrown) // 初回 .doThrow(toBeThrown) // リトライ1回目 .doThrow(toBeThrown) // リトライ2回目 .doThrow(toBeThrown) // リトライ3回目 .doThrow(toBeThrown) // リトライ4回目 .doReturn(SUCCESS_RESPONSE) // リトライ5回目で正常復帰 .when(requestMock) .doProcess(); String result = requestMock.doRequest(); assertEquals(SUCCESS_RESPONSE, result); // doProcessが6回呼び出されるはず Mockito.verify(requestMock, Mockito.times(6)).doProcess(); Mockito.verify(requestMock, Mockito.times(0)).onParticularError(Mockito.any(ElasticsearchException.class)); }
Example #19
Source File: ElasticsearchDataModel.java From elasticsearch-taste with Apache License 2.0 | 6 votes |
@Override public void setPreference(final long userID, final long itemID, final float value) { createUserID(userID); createItemID(itemID); final Map<String, Object> source = new HashMap<>(); source.put(userIdField, userID); source.put(itemIdField, itemID); source.put(valueField, value); source.put(timestampField, new Date()); try { client.prepareIndex(preferenceIndex, preferenceType) .setSource(source).setRefresh(true).execute().actionGet(); } catch (final ElasticsearchException e) { throw new TasteException("Failed to set (" + userID + "," + itemID + "," + value + ")", e); } }
Example #20
Source File: IngestTransportClient.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
@Override public IngestTransportClient bulkDelete(org.elasticsearch.action.delete.DeleteRequest deleteRequest) { if (closed) { if (throwable != null) { throw new ElasticsearchException("client is closed, possible reason: ", throwable); } else { throw new ElasticsearchException("client is closed"); } } try { metric.getCurrentIngest().inc(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); ingestProcessor.add(new DeleteRequest(deleteRequest)); } catch (Exception e) { logger.error("add of delete request failed: " + e.getMessage(), e); throwable = e; closed = true; } return this; }
Example #21
Source File: ElasticSearchServer.java From vind with Apache License 2.0 | 6 votes |
private IndexResult indexMultipleDocuments(List<Document> docs, int withinMs) { log.warn("Parameter 'within' not in use in elastic search backend"); final StopWatch elapsedTime = StopWatch.createStarted(); final List<Map<String,Object>> jsonDocs = docs.parallelStream() .map(DocumentUtil::createInputDocument) .collect(Collectors.toList()); try { if (elasticClientLogger.isTraceEnabled()) { elasticClientLogger.debug(">>> add({})", jsonDocs); } else { elasticClientLogger.debug(">>> add({})", jsonDocs); } final BulkResponse response =this.elasticSearchClient.add(jsonDocs) ; elapsedTime.stop(); return new IndexResult(elapsedTime.getTime()).setElapsedTime(elapsedTime.getTime()); } catch (ElasticsearchException | IOException e) { log.error("Cannot index documents {}", jsonDocs, e); throw new SearchServerException("Cannot index documents", e); } }
Example #22
Source File: IngestReplicaShardRequest.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(index); timeout.writeTo(out); out.writeLong(ingestId); shardId.writeTo(out); out.writeVInt(actionRequests.size()); for (ActionRequest<?> actionRequest : actionRequests) { if (actionRequest == null) { out.writeBoolean(false); continue; } out.writeBoolean(true); if (actionRequest instanceof IndexRequest) { out.writeBoolean(true); } else if (actionRequest instanceof DeleteRequest) { out.writeBoolean(false); } else { throw new ElasticsearchException("action request not supported: " + actionRequest.getClass().getName()); } actionRequest.writeTo(out); } }
Example #23
Source File: KerberosAuthenticationFailureHandler.java From elasticsearch-shield-kerberos-realm with Apache License 2.0 | 6 votes |
@Override public ElasticsearchSecurityException exceptionProcessingRequest(final TransportMessage message, final Exception e) { final ElasticsearchSecurityException se = super.exceptionProcessingRequest(message, e); String outToken = ""; if (e instanceof ElasticsearchException) { final ElasticsearchException kae = (ElasticsearchException) e; if (kae.getHeader("kerberos_out_token") != null) { outToken = " " + kae.getHeader("kerberos_out_token").get(0); } } se.addHeader(KrbConstants.WWW_AUTHENTICATE, KrbConstants.NEGOTIATE + outToken); if (logger.isDebugEnabled()) { logger.debug("exception for transport message: {}", e.toString()); } return se; }
Example #24
Source File: ElasticsearchContainer.java From logstash with Apache License 2.0 | 6 votes |
public Client createClient() { final AtomicReference<Client> elasticsearchClient = new AtomicReference<>(); await().atMost(30, TimeUnit.SECONDS).pollDelay(1, TimeUnit.SECONDS).until(() -> { Client c = new TransportClient(ImmutableSettings.settingsBuilder().put("cluster.name", elasticsearchClusterName).build()).addTransportAddress(new InetSocketTransportAddress(getIpAddress(), 9300)); try { c.admin().cluster().health(Requests.clusterHealthRequest("_all")).actionGet(); } catch (ElasticsearchException e) { c.close(); return false; } elasticsearchClient.set(c); return true; }); assertEquals(elasticsearchClusterName, elasticsearchClient.get().admin().cluster().health(Requests.clusterHealthRequest("_all")).actionGet().getClusterName()); return elasticsearchClient.get(); }
Example #25
Source File: TermVectorsRequest.java From Elasticsearch with Apache License 2.0 | 5 votes |
public static Map<String, String> readPerFieldAnalyzer(Map<String, Object> map) { Map<String, String> mapStrStr = new HashMap<>(); for (Map.Entry<String, Object> e : map.entrySet()) { if (e.getValue() instanceof String) { mapStrStr.put(e.getKey(), (String) e.getValue()); } else { throw new ElasticsearchException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass()); } } return mapStrStr; }
Example #26
Source File: TrendAction.java From foxtrot with Apache License 2.0 | 5 votes |
@Override public ActionResponse execute(TrendRequest parameter) { SearchRequestBuilder searchRequestBuilder = getRequestBuilder(parameter); try { SearchResponse searchResponse = searchRequestBuilder.execute() .actionGet(getGetQueryTimeout()); return getResponse(searchResponse, parameter); } catch (ElasticsearchException e) { throw FoxtrotExceptions.createQueryExecutionException(parameter, e); } }
Example #27
Source File: AbstractGatherer.java From elasticsearch-gatherer with Apache License 2.0 | 5 votes |
public Gatherer start() throws ElasticSearchException { //this.logger = Loggers.getLogger(getClass(), settings.globalSettings(), riverName); this.bulkActions = settings.getAsInt("bulk_actions", 1000); this.bulkSize = settings.getAsBytesSize("bulk_size", new ByteSizeValue(5, ByteSizeUnit.MB)); this.flushInterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5)); this.concurrentRequests = settings.getAsInt("concurrent_requests", 4); bulkProcessor = BulkProcessor.builder(client, new BulkListener()) .setBulkActions(bulkActions) .setBulkSize(bulkSize) .setFlushInterval(flushInterval) .setConcurrentRequests(concurrentRequests) .build(); return this; }
Example #28
Source File: BulkNodeClient.java From elasticsearch-helper with Apache License 2.0 | 5 votes |
@Override public BulkNodeClient bulkUpdate(UpdateRequest updateRequest) { if (closed) { throw new ElasticsearchException("client is closed"); } try { if (metric != null) { metric.getCurrentIngest().inc(updateRequest.index(), updateRequest.type(), updateRequest.id()); } bulkProcessor.add(updateRequest); } catch (Exception e) { throwable = e; closed = true; logger.error("bulk add of update request failed: " + e.getMessage(), e); } return this; }
Example #29
Source File: AbstractTransportExportAction.java From elasticsearch-inout-plugin with Apache License 2.0 | 5 votes |
@Override protected ShardExportResponse shardOperation(ShardExportRequest request) throws ElasticSearchException { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(request.shardId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId()); ExportContext context = new ExportContext(0, new ShardSearchRequest().types(request.types()).filteringAliases(request.filteringAliases()), shardTarget, indexShard.searcher(), indexService, indexShard, scriptService, cacheRecycler, nodePath); ExportContext.setCurrent(context); try { BytesReference source = request.source(); exportParser.parseSource(context, source); context.preProcess(); exporter.check(context); try { if (context.explain()) { return new ShardExportResponse(shardTarget.nodeIdText(), request.index(), request.shardId(), context.outputCmd(), context.outputCmdArray(), context.outputFile()); } else { Exporter.Result res = exporter.execute(context); return new ShardExportResponse(shardTarget.nodeIdText(), request.index(), request.shardId(), context.outputCmd(), context.outputCmdArray(), context.outputFile(), res.outputResult.stdErr, res.outputResult.stdOut, res.outputResult.exit, res.numExported); } } catch (Exception e) { throw new QueryPhaseExecutionException(context, "failed to execute export", e); } } finally { // this will also release the index searcher context.release(); SearchContext.removeCurrent(); } }
Example #30
Source File: AbstractTransportImportAction.java From elasticsearch-inout-plugin with Apache License 2.0 | 5 votes |
@Override protected NodeImportResponse nodeOperation(NodeImportRequest request) throws ElasticSearchException { ImportContext context = new ImportContext(nodePath); BytesReference source = request.source(); importParser.parseSource(context, source); Importer.Result result = importer.execute(context, request); return new NodeImportResponse(clusterService.state().nodes().localNode(), result); }