org.elasticsearch.common.util.concurrent.EsRejectedExecutionException Java Examples
The following examples show how to use
org.elasticsearch.common.util.concurrent.EsRejectedExecutionException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #2
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #3
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #4
Source File: TransportService.java From crate with Apache License 2.0 | 6 votes |
@Override public void onConnectionClosed(Transport.Connection connection) { try { List<Transport.ResponseContext> pruned = responseHandlers.prune(h -> h.connection().getCacheKey().equals(connection .getCacheKey())); // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows getExecutorService().execute(() -> { for (Transport.ResponseContext holderToNotify : pruned) { holderToNotify.handler().handleException(new NodeDisconnectedException(connection.getNode(), holderToNotify.action())); } }); } catch (EsRejectedExecutionException ex) { LOGGER.debug("Rejected execution on onConnectionClosed", ex); } }
Example #5
Source File: BulkRequestHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void execute(BulkRequest bulkRequest, long executionId) { boolean afterCalled = false; try { listener.beforeBulk(executionId, bulkRequest); BulkResponse bulkResponse = Retry .on(EsRejectedExecutionException.class) .policy(backoffPolicy) .withSyncBackoff(client, bulkRequest); afterCalled = true; listener.afterBulk(executionId, bulkRequest, bulkResponse); } catch (InterruptedException e) { Thread.currentThread().interrupt(); logger.info("Bulk request {} has been cancelled.", e, executionId); if (!afterCalled) { listener.afterBulk(executionId, bulkRequest, e); } } catch (Throwable t) { logger.warn("Failed to execute bulk request {}.", t, executionId); if (!afterCalled) { listener.afterBulk(executionId, bulkRequest, t); } } }
Example #6
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #7
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #8
Source File: RetryRequestFailureHandler.java From flink-learning with Apache License 2.0 | 6 votes |
@Override public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable { if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) { requestIndexer.add(new ActionRequest[]{actionRequest}); } else { if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) { return; } else { Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class); if (exp.isPresent()) { IOException ioExp = exp.get(); if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) { log.error(ioExp.getMessage()); return; } } } throw throwable; } }
Example #9
Source File: InternalClusterInfoService.java From crate with Apache License 2.0 | 6 votes |
@Override public void onMaster() { this.isMaster = true; if (LOGGER.isTraceEnabled()) { LOGGER.trace("I have been elected master, scheduling a ClusterInfoUpdateJob"); } // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running threadPool.scheduleUnlessShuttingDown(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); try { if (clusterService.state().getNodes().getDataNodes().size() > 1) { // Submit an info update job to be run immediately threadPool.executor(executorName()).execute(this::maybeRefresh); } } catch (EsRejectedExecutionException ex) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); } } }
Example #10
Source File: InternalClusterInfoService.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void onMaster() { this.isMaster = true; if (logger.isTraceEnabled()) { logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob"); } try { // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); if (clusterService.state().getNodes().getDataNodes().size() > 1) { // Submit an info update job to be run immediately threadPool.executor(executorName()).execute(new Runnable() { @Override public void run() { maybeRefresh(); } }); } } catch (EsRejectedExecutionException ex) { if (logger.isDebugEnabled()) { logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); } } }
Example #11
Source File: ClusterApplierService.java From crate with Apache License 2.0 | 6 votes |
private void submitStateUpdateTask(final String source, final ClusterStateTaskConfig config, final Function<ClusterState, ClusterState> executor, final ClusterApplyListener listener) { if (!lifecycle.started()) { return; } try { UpdateTask updateTask = new UpdateTask(config.priority(), source, new SafeClusterApplyListener(listener, LOGGER), executor); if (config.timeout() != null) { threadPoolExecutor.execute(updateTask, config.timeout(), () -> threadPool.generic().execute( () -> listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)))); } else { threadPoolExecutor.execute(updateTask); } } catch (EsRejectedExecutionException e) { // ignore cases where we are shutting down..., there is really nothing interesting // to be done here... if (!lifecycle.stoppedOrClosed()) { throw e; } } }
Example #12
Source File: IndicesRequestCache.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void run() { if (closed) { return; } if (keysToClean.isEmpty()) { schedule(); return; } try { threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { @Override public void run() { reap(); schedule(); } }); } catch (EsRejectedExecutionException ex) { logger.debug("Can not run ReaderCleaner - execution rejected", ex); } }
Example #13
Source File: InternalClusterInfoService.java From crate with Apache License 2.0 | 6 votes |
@Override public void run() { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Submitting new rescheduling cluster info update job"); } try { threadPool.executor(executorName()).execute(() -> { try { maybeRefresh(); } finally { //schedule again after we refreshed if (isMaster) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Scheduling next run for updating cluster info in: {}", updateFrequency.toString()); } threadPool.scheduleUnlessShuttingDown(updateFrequency, executorName(), this); } } }); } catch (EsRejectedExecutionException ex) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Couldn't re-schedule cluster info update task - node might be shutting down", ex); } } }
Example #14
Source File: ExceptionsHelper.java From crate with Apache License 2.0 | 5 votes |
public static RestStatus status(Throwable t) { if (t != null) { if (t instanceof ElasticsearchException) { return ((ElasticsearchException) t).status(); } else if (t instanceof IllegalArgumentException) { return RestStatus.BAD_REQUEST; } else if (t instanceof EsRejectedExecutionException) { return RestStatus.TOO_MANY_REQUESTS; } } return RestStatus.INTERNAL_SERVER_ERROR; }
Example #15
Source File: IncrementalPageBucketReceiver.java From crate with Apache License 2.0 | 5 votes |
@Override public void setBucket(int bucketIdx, Bucket rows, boolean isLast, PageResultListener pageResultListener) { if (processingFuture.isCompletedExceptionally()) { pageResultListener.needMore(false); return; } else { pageResultListener.needMore(!isLast); } // We make sure only one accumulation operation runs at a time because the state is not thread-safe. synchronized (state) { if (currentlyAccumulating == null) { try { currentlyAccumulating = CompletableFuture.runAsync(() -> processRows(rows), executor); } catch (EsRejectedExecutionException e) { processingFuture.completeExceptionally(e); } } else { currentlyAccumulating = currentlyAccumulating.whenComplete((r, t) -> { if (t == null) { processRows(rows); } else if (t instanceof RuntimeException) { processingFuture.completeExceptionally(t); throw (RuntimeException) t; } else { processingFuture.completeExceptionally(t); throw new RuntimeException(t); } }); } } if (isLast) { if (remainingUpstreams.decrementAndGet() == 0) { currentlyAccumulating.whenComplete((r, t) -> consumeRows()); } } }
Example #16
Source File: ConnectionManager.java From crate with Apache License 2.0 | 5 votes |
@Override protected void onAfterInLifecycle() { try { threadPool.schedule(this, pingSchedule, ThreadPool.Names.GENERIC); } catch (EsRejectedExecutionException ex) { if (ex.isExecutorShutdown()) { LOGGER.debug("couldn't schedule new ping execution, executor is shutting down", ex); } else { throw ex; } } }
Example #17
Source File: TaskBatcher.java From crate with Apache License 2.0 | 5 votes |
public void submitTasks(List<? extends BatchedTask> tasks, @Nullable TimeValue timeout) throws EsRejectedExecutionException { if (tasks.isEmpty()) { return; } final BatchedTask firstTask = tasks.get(0); assert tasks.stream().allMatch(t -> t.batchingKey == firstTask.batchingKey) : "tasks submitted in a batch should share the same batching key: " + tasks; // convert to an identity map to check for dups based on task identity final Map<Object, BatchedTask> tasksIdentity = tasks.stream().collect(Collectors.toMap( BatchedTask::getTask, Function.identity(), (a, b) -> { throw new IllegalStateException("cannot add duplicate task: " + a); }, IdentityHashMap::new) ); synchronized (tasksPerBatchingKey) { LinkedHashSet<BatchedTask> existingTasks = tasksPerBatchingKey.computeIfAbsent(firstTask.batchingKey, k -> new LinkedHashSet<>(tasks.size())); for (BatchedTask existing : existingTasks) { // check that there won't be two tasks with the same identity for the same batching key BatchedTask duplicateTask = tasksIdentity.get(existing.getTask()); if (duplicateTask != null) { throw new IllegalStateException("task [" + duplicateTask.describeTasks( Collections.singletonList(existing)) + "] with source [" + duplicateTask.source + "] is already queued"); } } existingTasks.addAll(tasks); } if (timeout != null) { threadExecutor.execute(firstTask, timeout, () -> onTimeoutInternal(tasks, timeout)); } else { threadExecutor.execute(firstTask); } }
Example #18
Source File: ClusterApplierService.java From crate with Apache License 2.0 | 5 votes |
/** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. * * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. */ public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { if (lifecycle.stoppedOrClosed()) { listener.onClose(); return; } // call the post added notification on the same event thread try { threadPoolExecutor.execute(new SourcePrioritizedRunnable(Priority.HIGH, "_add_listener_") { @Override public void run() { if (timeout != null) { NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout); notifyTimeout.cancellable = threadPool.schedule(notifyTimeout, timeout, ThreadPool.Names.GENERIC); onGoingTimeouts.add(notifyTimeout); } timeoutClusterStateListeners.add(listener); listener.postAdded(); } }); } catch (EsRejectedExecutionException e) { if (lifecycle.stoppedOrClosed()) { listener.onClose(); } else { throw e; } } }
Example #19
Source File: ThreadPool.java From crate with Apache License 2.0 | 5 votes |
public void scheduleUnlessShuttingDown(TimeValue delay, String executor, Runnable command) { try { schedule(command, delay, executor); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { LOGGER.debug(new ParameterizedMessage("could not schedule execution of [{}] after [{}] on [{}] as executor is shut down", command, delay, executor), e); } else { throw e; } } }
Example #20
Source File: ThreadPool.java From crate with Apache License 2.0 | 5 votes |
@Override public void run() { try { executor.execute(runnable); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { LOGGER.debug(new ParameterizedMessage("could not schedule execution of [{}] on [{}] as executor is shut down", runnable, executor), e); } else { throw e; } } }
Example #21
Source File: Scheduler.java From crate with Apache License 2.0 | 5 votes |
@Override public void onAfter() { // if this has not been cancelled reschedule it to run again if (run) { try { scheduler.schedule(this, interval, executor); } catch (final EsRejectedExecutionException e) { onRejection(e); } } }
Example #22
Source File: DistributingConsumer.java From crate with Apache License 2.0 | 5 votes |
private void countdownAndMaybeContinue(BatchIterator<Row> it, AtomicInteger numActiveRequests, boolean sameExecutor) { if (numActiveRequests.decrementAndGet() == 0) { if (downstreams.stream().anyMatch(Downstream::needsMoreData)) { if (failure == null) { if (sameExecutor) { consumeIt(it); } else { // try to dispatch to different executor, if it fails, forward the error in the same thread try { responseExecutor.execute(() -> consumeIt(it)); } catch (EsRejectedExecutionException e) { failure = e; forwardFailure(it, failure); } } } else { forwardFailure(it, failure); } } else { // If we've a failure we either communicated it to the other downstreams already, // or were able to send results to all downstreams. In either case, *this* operation succeeded and the // downstreams need to deal with failures. // The NodeDisconnectJobMonitorService takes care of node disconnects, so we don't have to manage // that scenario. it.close(); completionFuture.complete(null); } } }
Example #23
Source File: RetryListener.java From crate with Apache License 2.0 | 5 votes |
@Override public void onFailure(Exception e) { Throwable throwable = SQLExceptions.unwrap(e); if (throwable instanceof EsRejectedExecutionException && delay.hasNext()) { TimeValue currentDelay = delay.next(); scheduler.schedule(retryCommand, currentDelay.millis(), TimeUnit.MILLISECONDS); } else { delegate.onFailure(e); } }
Example #24
Source File: RetryRejectedExecutionFailureHandler.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void onFailure(ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable { if (ExceptionUtils.findThrowable(failure, EsRejectedExecutionException.class).isPresent()) { indexer.add(action); } else { // rethrow all other failures throw failure; } }
Example #25
Source File: UserRequestHandler.java From elasticsearch-taste with Apache License 2.0 | 5 votes |
private void doUserUpdate(final Params params, final RequestHandler.OnErrorListener listener, final Map<String, Object> requestMap, final Map<String, Object> paramMap, final Map<String, Object> userMap, final String index, final String type, final String userIdField, final String timestampField, final Long userId, final OpType opType, final RequestHandlerChain chain) { userMap.put(userIdField, userId); userMap.put(timestampField, new Date()); final OnResponseListener<IndexResponse> responseListener = response -> { paramMap.put(userIdField, userId); chain.execute(params, listener, requestMap, paramMap); }; final OnFailureListener failureListener = t -> { if (t instanceof DocumentAlreadyExistsException || t instanceof EsRejectedExecutionException) { sleep(t); execute(params, listener, requestMap, paramMap, chain); } else { listener.onError(t); } }; client.prepareIndex(index, type, userId.toString()).setSource(userMap) .setRefresh(true).setOpType(opType) .execute(on(responseListener, failureListener)); }
Example #26
Source File: ItemRequestHandler.java From elasticsearch-taste with Apache License 2.0 | 5 votes |
private void doItemUpdate(final Params params, final RequestHandler.OnErrorListener listener, final Map<String, Object> requestMap, final Map<String, Object> paramMap, final Map<String, Object> itemMap, final String index, final String type, final String itemIdField, final String timestampField, final Long itemId, final OpType opType, final RequestHandlerChain chain) { itemMap.put(itemIdField, itemId); itemMap.put(timestampField, new Date()); final OnResponseListener<IndexResponse> responseListener = response -> { paramMap.put(itemIdField, itemId); chain.execute(params, listener, requestMap, paramMap); }; final OnFailureListener failureListener = t -> { sleep(t); if (t instanceof DocumentAlreadyExistsException || t instanceof EsRejectedExecutionException) { execute(params, listener, requestMap, paramMap, chain); } else { listener.onError(t); } }; client.prepareIndex(index, type, itemId.toString()).setSource(itemMap) .setRefresh(true).setOpType(opType) .execute(on(responseListener, failureListener)); }
Example #27
Source File: RetryRejectedExecutionFailureHandler.java From flink with Apache License 2.0 | 5 votes |
@Override public void onFailure(ActionRequest action, Throwable failure, int restStatusCode, RequestIndexer indexer) throws Throwable { LOG.error("Failed Elasticsearch item request: {}", failure.getMessage(), failure); if (ExceptionUtils.findThrowable(failure, EsRejectedExecutionException.class).isPresent()) { indexer.add(action); } else { // rethrow all other failures throw failure; } }
Example #28
Source File: InternalClusterService.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void add(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { if (lifecycle.stoppedOrClosed()) { listener.onClose(); return; } // call the post added notification on the same event thread try { updateTasksExecutor.execute(new SourcePrioritizedRunnable(Priority.HIGH, "_add_listener_") { @Override public void run() { if (timeout != null) { NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout); notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout); onGoingTimeouts.add(notifyTimeout); } postAppliedListeners.add(listener); listener.postAdded(); } }); } catch (EsRejectedExecutionException e) { if (lifecycle.stoppedOrClosed()) { listener.onClose(); } else { throw e; } } }
Example #29
Source File: NodesFaultDetection.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void notifyNodeFailure(final DiscoveryNode node, final String reason) { try { threadPool.generic().execute(new Runnable() { @Override public void run() { for (Listener listener : listeners) { listener.onNodeFailure(node, reason); } } }); } catch (EsRejectedExecutionException ex) { logger.trace("[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", ex, node, reason); } }
Example #30
Source File: ZenPingService.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void ping(PingListener listener, TimeValue timeout) { List<? extends ZenPing> zenPings = this.zenPings; CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings); for (ZenPing zenPing : zenPings) { try { zenPing.ping(compoundPingListener, timeout); } catch (EsRejectedExecutionException ex) { logger.debug("Ping execution rejected", ex); compoundPingListener.onPing(null); } } }