org.elasticsearch.node.NodeClosedException Java Examples
The following examples show how to use
org.elasticsearch.node.NodeClosedException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TransportMasterNodeAction.java From crate with Apache License 2.0 | 6 votes |
private void retry(final Throwable failure, final Predicate<ClusterState> statePredicate) { observer.waitForNextChange( new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { doStart(state); } @Override public void onClusterServiceClose() { listener.onFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { logger.debug(() -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, statePredicate ); }
Example #2
Source File: ReplicationOperation.java From crate with Apache License 2.0 | 6 votes |
private void onNoLongerPrimary(Exception failure) { final Throwable cause = ExceptionsHelper.unwrapCause(failure); final boolean nodeIsClosing = cause instanceof NodeClosedException || (cause instanceof TransportException && "TransportService is closed stopped can't send request".equals(cause.getMessage())); final String message; if (nodeIsClosing) { message = String.format(Locale.ROOT, "node with primary [%s] is shutting down while failing replica shard", primary.routingEntry()); // We prefer not to fail the primary to avoid unnecessary warning log // when the node with the primary shard is gracefully shutting down. } else { if (Assertions.ENABLED) { if (failure instanceof ShardStateAction.NoLongerPrimaryShardException == false) { throw new AssertionError("unexpected failure", failure); } } // we are no longer the primary, fail ourselves and start over message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard", primary.routingEntry()); primary.failShard(message, failure); } finishAsFailed(new RetryOnPrimaryException(primary.routingEntry().shardId(), message, failure)); }
Example #3
Source File: TransportLeaderShardIngestAction.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
private void retry(@Nullable final Throwable failure) { if (observer.isTimedOut()) { listener.onFailure(failure); return; } request.operationThreaded(true); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { doStart(); } @Override public void onClusterServiceClose() { listener.onFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { if (doStart()) { return; } raiseTimeoutFailure(timeout, failure); } }); }
Example #4
Source File: TransportReplicaShardIngestAction.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
private void retry(@Nullable final Throwable failure) { if (observer.isTimedOut()) { listener.onFailure(failure); return; } request.operationThreaded(true); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { doStart(); } @Override public void onClusterServiceClose() { listener.onFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { if (doStart()) { return; } raiseTimeoutFailure(timeout, failure); } }); }
Example #5
Source File: TransportMasterNodeAction.java From Elasticsearch with Apache License 2.0 | 6 votes |
private void retry(final Throwable failure, final ClusterStateObserver.ChangePredicate changePredicate) { observer.waitForNextChange( new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { doStart(); } @Override public void onClusterServiceClose() { listener.onFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, changePredicate ); }
Example #6
Source File: TransportReplicationAction.java From crate with Apache License 2.0 | 5 votes |
private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction, final TransportRequest requestToPerform) { transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler<Response>() { @Override public Response read(StreamInput in) throws IOException { return TransportReplicationAction.this.read(in); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleResponse(Response response) { finishOnSuccess(response); } @Override public void handleException(TransportException exp) { try { // if we got disconnected from the node, or the node / shard is not in the right state (being closed) final Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(cause))) { logger.trace(() -> new ParameterizedMessage( "received an error from node [{}] for request [{}], scheduling a retry", node.getId(), requestToPerform), exp); retry(exp); } else { finishAsFailed(exp); } } catch (Exception e) { e.addSuppressed(exp); finishWithUnexpectedFailure(e); } } }); }
Example #7
Source File: TransportReplicationAction.java From crate with Apache License 2.0 | 5 votes |
@Override public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { logger.trace( () -> new ParameterizedMessage( "Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, request ), e ); request.onRetry(); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { // Forking a thread on local node via transport service so that custom transport service have an // opportunity to execute custom logic before the replica operation begins String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; TransportChannelResponseHandler<TransportResponse.Empty> handler = new TransportChannelResponseHandler<>(logger, channel, extraMessage, in -> TransportResponse.Empty.INSTANCE); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, new ConcreteReplicaRequest<>(request, targetAllocationID, primaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes), handler); } @Override public void onClusterServiceClose() { responseWithFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { throw new AssertionError("Cannot happen: there is not timeout"); } }); } else { responseWithFailure(e); } }
Example #8
Source File: TransportReplicationAction.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void onFailure(Throwable t) { if (t instanceof RetryOnReplicaException) { logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { // Forking a thread on local node via transport service so that custom transport service have an // opportunity to execute custom logic before the replica operation begins String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage); transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); } @Override public void onClusterServiceClose() { responseWithFailure(new NodeClosedException(clusterService.localNode())); } @Override public void onTimeout(TimeValue timeout) { throw new AssertionError("Cannot happen: there is not timeout"); } }); } else { try { failReplicaIfNeeded(t); } catch (Throwable unexpected) { logger.error("{} unexpected error while failing replica", unexpected, request.shardId().id()); } finally { responseWithFailure(t); } } }
Example #9
Source File: TransportReplicationAction.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void performAction(final DiscoveryNode node, final String action, final boolean isPrimaryAction) { transportService.sendRequest(node, action, request, transportOptions, new BaseTransportResponseHandler<Response>() { @Override public Response newInstance() { return newResponseInstance(); } @Override public String executor() { return ThreadPool.Names.SAME; } @Override public void handleResponse(Response response) { finishOnSuccess(response); } @Override public void handleException(TransportException exp) { try { // if we got disconnected from the node, or the node / shard is not in the right state (being closed) if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(exp.unwrapCause()))) { logger.trace("received an error from node [{}] for request [{}], scheduling a retry", exp, node.id(), request); request.setCanHaveDuplicates(); retry(exp); } else { finishAsFailed(exp); } } catch (Throwable t) { finishWithUnexpectedFailure(t); } } }); }
Example #10
Source File: TestPutElasticsearch.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testPutElasticsearchOnTriggerWithExceptions() throws IOException { PutElasticsearchTestProcessor processor = new PutElasticsearchTestProcessor(false); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(PutElasticsearch.INDEX, "doc"); runner.setProperty(PutElasticsearch.TYPE, "status"); runner.setProperty(PutElasticsearch.ID_ATTRIBUTE, "doc_id"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652142"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652143"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652144"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(PutElasticsearch.REL_FAILURE, 1); }
Example #11
Source File: TestFetchElasticsearch5.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testFetchElasticsearch5OnTriggerWithExceptions() throws IOException { FetchElasticsearch5TestProcessor processor = new FetchElasticsearch5TestProcessor(true); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(FetchElasticsearch5.INDEX, "doc"); runner.setProperty(FetchElasticsearch5.TYPE, "status"); runner.setProperty(FetchElasticsearch5.DOC_ID, "${doc_id}"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(FetchElasticsearch5.REL_FAILURE, 1); }
Example #12
Source File: TestPutElasticsearch5.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testPutElasticsearch5OnTriggerWithExceptions() throws IOException { PutElasticsearch5TestProcessor processor = new PutElasticsearch5TestProcessor(false); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(PutElasticsearch5.INDEX, "doc"); runner.setProperty(PutElasticsearch5.TYPE, "status"); runner.setProperty(PutElasticsearch5.ID_ATTRIBUTE, "doc_id"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652142"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652143"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652144"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(PutElasticsearch5.REL_FAILURE, 1); }
Example #13
Source File: FetchElasticsearch5.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { synchronized (esClient) { if(esClient.get() == null) { super.setup(context); } } FlowFile flowFile = session.get(); if (flowFile == null) { return; } final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue(); final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue(); final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue()); final ComponentLog logger = getLogger(); try { logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId}); GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId); final GetResponse getResponse = getRequestBuilder.execute().actionGet(); if (getResponse == null || !getResponse.isExists()) { logger.debug("Failed to read {}/{}/{} from Elasticsearch: Document not found", new Object[]{index, docType, docId}); // We couldn't find the document, so penalize it and send it to "not found" flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_NOT_FOUND); } else { flowFile = session.putAllAttributes(flowFile, new HashMap<String, String>() {{ put("filename", docId); put("es.index", index); put("es.type", docType); }}); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(getResponse.getSourceAsString().getBytes(charset)); } }); logger.debug("Elasticsearch document " + docId + " fetched, routing to success"); // The document is JSON, so update the MIME type of the flow file flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/json"); session.getProvenanceReporter().fetch(flowFile, getResponse.remoteAddress().getAddress()); session.transfer(flowFile, REL_SUCCESS); } } catch (NoNodeAvailableException | ElasticsearchTimeoutException | ReceiveTimeoutTransportException | NodeClosedException exceptionToRetry) { logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration " + "(hosts, username/password, etc.), or this issue may be transient. Routing to retry", new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry); session.transfer(flowFile, REL_RETRY); context.yield(); } catch (Exception e) { logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e); session.transfer(flowFile, REL_FAILURE); context.yield(); } }
Example #14
Source File: TestFetchElasticsearch.java From nifi with Apache License 2.0 | 4 votes |
@Test public void testFetchElasticsearchOnTriggerWithExceptions() throws IOException { FetchElasticsearchTestProcessor processor = new FetchElasticsearchTestProcessor(true); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(FetchElasticsearch.INDEX, "doc"); runner.setProperty(FetchElasticsearch.TYPE, "status"); runner.setProperty(FetchElasticsearch.DOC_ID, "${doc_id}"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(FetchElasticsearch.REL_FAILURE, 1); }
Example #15
Source File: FetchElasticsearch.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue(); final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue(); final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue()); final ComponentLog logger = getLogger(); try { logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId}); final long startNanos = System.nanoTime(); GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId); if (authToken != null) { getRequestBuilder.putHeader("Authorization", authToken); } final GetResponse getResponse = getRequestBuilder.execute().actionGet(); if (getResponse == null || !getResponse.isExists()) { logger.debug("Failed to read {}/{}/{} from Elasticsearch: Document not found", new Object[]{index, docType, docId}); // We couldn't find the document, so penalize it and send it to "not found" flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_NOT_FOUND); } else { flowFile = session.putAttribute(flowFile, "filename", docId); flowFile = session.putAttribute(flowFile, "es.index", index); flowFile = session.putAttribute(flowFile, "es.type", docType); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(getResponse.getSourceAsString().getBytes(charset)); } }); logger.debug("Elasticsearch document " + docId + " fetched, routing to success"); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); final String uri = context.getProperty(HOSTS).evaluateAttributeExpressions().getValue() + "/" + index + "/" + docType + "/" + docId; session.getProvenanceReporter().fetch(flowFile, uri, millis); session.transfer(flowFile, REL_SUCCESS); } } catch (NoNodeAvailableException | ElasticsearchTimeoutException | ReceiveTimeoutTransportException | NodeClosedException exceptionToRetry) { logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration " + "(hosts, username/password, etc.). Routing to retry", new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry); session.transfer(flowFile, REL_RETRY); context.yield(); } catch (Exception e) { logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e); session.transfer(flowFile, REL_FAILURE); context.yield(); } }
Example #16
Source File: TestFetchElasticsearch5.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testFetchElasticsearch5OnTriggerWithExceptions() throws IOException { FetchElasticsearch5TestProcessor processor = new FetchElasticsearch5TestProcessor(true); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(FetchElasticsearch5.INDEX, "doc"); runner.setProperty(FetchElasticsearch5.TYPE, "status"); runner.setValidateExpressionUsage(true); runner.setProperty(FetchElasticsearch5.DOC_ID, "${doc_id}"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(FetchElasticsearch5.REL_FAILURE, 1); }
Example #17
Source File: TestPutElasticsearch5.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testPutElasticsearch5OnTriggerWithExceptions() throws IOException { PutElasticsearch5TestProcessor processor = new PutElasticsearch5TestProcessor(false); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearch5TransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(PutElasticsearch5.INDEX, "doc"); runner.setProperty(PutElasticsearch5.TYPE, "status"); runner.setValidateExpressionUsage(true); runner.setProperty(PutElasticsearch5.ID_ATTRIBUTE, "doc_id"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652142"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652143"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch5.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch5 Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652144"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(PutElasticsearch5.REL_FAILURE, 1); }
Example #18
Source File: FetchElasticsearch5.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { synchronized (esClient) { if(esClient.get() == null) { super.setup(context); } } FlowFile flowFile = session.get(); if (flowFile == null) { return; } final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue(); final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue(); final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue()); final ComponentLog logger = getLogger(); try { logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId}); GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId); final GetResponse getResponse = getRequestBuilder.execute().actionGet(); if (getResponse == null || !getResponse.isExists()) { logger.warn("Failed to read {}/{}/{} from Elasticsearch: Document not found", new Object[]{index, docType, docId}); // We couldn't find the document, so penalize it and send it to "not found" flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_NOT_FOUND); } else { flowFile = session.putAllAttributes(flowFile, new HashMap<String, String>() {{ put("filename", docId); put("es.index", index); put("es.type", docType); }}); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(getResponse.getSourceAsString().getBytes(charset)); } }); logger.debug("Elasticsearch document " + docId + " fetched, routing to success"); // The document is JSON, so update the MIME type of the flow file flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/json"); session.getProvenanceReporter().fetch(flowFile, getResponse.remoteAddress().getAddress()); session.transfer(flowFile, REL_SUCCESS); } } catch (NoNodeAvailableException | ElasticsearchTimeoutException | ReceiveTimeoutTransportException | NodeClosedException exceptionToRetry) { logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration " + "(hosts, username/password, etc.), or this issue may be transient. Routing to retry", new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry); session.transfer(flowFile, REL_RETRY); context.yield(); } catch (Exception e) { logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e); session.transfer(flowFile, REL_FAILURE); context.yield(); } }
Example #19
Source File: TestFetchElasticsearch.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testFetchElasticsearchOnTriggerWithExceptions() throws IOException { FetchElasticsearchTestProcessor processor = new FetchElasticsearchTestProcessor(true); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(FetchElasticsearch.INDEX, "doc"); runner.setProperty(FetchElasticsearch.TYPE, "status"); runner.setValidateExpressionUsage(true); runner.setProperty(FetchElasticsearch.DOC_ID, "${doc_id}"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(FetchElasticsearch.REL_FAILURE, 1); }
Example #20
Source File: TestPutElasticsearch.java From localization_nifi with Apache License 2.0 | 4 votes |
@Test public void testPutElasticsearchOnTriggerWithExceptions() throws IOException { PutElasticsearchTestProcessor processor = new PutElasticsearchTestProcessor(false); runner = TestRunners.newTestRunner(processor); runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s"); runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s"); runner.setProperty(PutElasticsearch.INDEX, "doc"); runner.setProperty(PutElasticsearch.TYPE, "status"); runner.setValidateExpressionUsage(true); runner.setProperty(PutElasticsearch.ID_ATTRIBUTE, "doc_id"); // No Node Available exception processor.setExceptionToThrow(new NoNodeAvailableException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652140"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Timeout exception processor.setExceptionToThrow(new ElasticsearchTimeoutException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652141"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Receive Timeout Transport exception processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652142"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Node Closed exception processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class))); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652143"); }}); runner.run(1, true, true); runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1); runner.clearTransferState(); // Elasticsearch Parse exception processor.setExceptionToThrow(new ElasticsearchParseException("test")); runner.enqueue(docExample, new HashMap<String, String>() {{ put("doc_id", "28039652144"); }}); runner.run(1, true, true); // This test generates an exception on execute(),routes to failure runner.assertTransferCount(PutElasticsearch.REL_FAILURE, 1); }
Example #21
Source File: FetchElasticsearch.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue(); final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue(); final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue()); final ComponentLog logger = getLogger(); try { logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId}); final long startNanos = System.nanoTime(); GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId); if (authToken != null) { getRequestBuilder.putHeader("Authorization", authToken); } final GetResponse getResponse = getRequestBuilder.execute().actionGet(); if (getResponse == null || !getResponse.isExists()) { logger.warn("Failed to read {}/{}/{} from Elasticsearch: Document not found", new Object[]{index, docType, docId}); // We couldn't find the document, so penalize it and send it to "not found" flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_NOT_FOUND); } else { flowFile = session.putAttribute(flowFile, "filename", docId); flowFile = session.putAttribute(flowFile, "es.index", index); flowFile = session.putAttribute(flowFile, "es.type", docType); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(getResponse.getSourceAsString().getBytes(charset)); } }); logger.debug("Elasticsearch document " + docId + " fetched, routing to success"); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); final String uri = context.getProperty(HOSTS).evaluateAttributeExpressions().getValue() + "/" + index + "/" + docType + "/" + docId; session.getProvenanceReporter().fetch(flowFile, uri, millis); session.transfer(flowFile, REL_SUCCESS); } } catch (NoNodeAvailableException | ElasticsearchTimeoutException | ReceiveTimeoutTransportException | NodeClosedException exceptionToRetry) { logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration " + "(hosts, username/password, etc.). Routing to retry", new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry); session.transfer(flowFile, REL_RETRY); context.yield(); } catch (Exception e) { logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e); session.transfer(flowFile, REL_FAILURE); context.yield(); } }
Example #22
Source File: AnomalyResultTransportAction.java From anomaly-detection with Apache License 2.0 | 2 votes |
/** * Check if the input exception indicates connection issues. * * @param e exception * @return true if we get disconnected from the node or the node is not in the * right state (being closed) or transport request times out (sent from TimeoutHandler.run) */ private boolean hasConnectionIssue(Throwable e) { return e instanceof ConnectTransportException || e instanceof NodeClosedException || e instanceof ReceiveTimeoutTransportException; }