org.elasticsearch.common.util.concurrent.EsExecutors Java Examples
The following examples show how to use
org.elasticsearch.common.util.concurrent.EsExecutors.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MockTcpTransport.java From crate with Apache License 2.0 | 6 votes |
public MockTcpTransport(Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, Version mockVersion) { super("mock-tcp-transport", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); // we have our own crazy cached threadpool this one is not bounded at all... // using the ES thread factory here is crucial for tests otherwise disruption tests won't block that thread executor = Executors.newCachedThreadPool(EsExecutors.daemonThreadFactory(settings, Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX)); this.mockVersion = mockVersion; }
Example #2
Source File: ProjectionToProjectorVisitor.java From crate with Apache License 2.0 | 6 votes |
public ProjectionToProjectorVisitor(ClusterService clusterService, NodeJobsCounter nodeJobsCounter, Functions functions, ThreadPool threadPool, Settings settings, TransportActionProvider transportActionProvider, InputFactory inputFactory, EvaluatingNormalizer normalizer, Function<RelationName, SysRowUpdater<?>> sysUpdaterGetter, Function<RelationName, StaticTableDefinition<?>> staticTableDefinitionGetter, Version indexVersionCreated, @Nullable ShardId shardId) { this.clusterService = clusterService; this.nodeJobsCounter = nodeJobsCounter; this.functions = functions; this.threadPool = threadPool; this.settings = settings; this.transportActionProvider = transportActionProvider; this.inputFactory = inputFactory; this.normalizer = normalizer; this.sysUpdaterGetter = sysUpdaterGetter; this.staticTableDefinitionGetter = staticTableDefinitionGetter; this.indexVersionCreated = indexVersionCreated; this.shardId = shardId; this.numProcessors = EsExecutors.numberOfProcessors(settings); }
Example #3
Source File: ScalingExecutorBuilder.java From crate with Apache License 2.0 | 6 votes |
@Override ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings) { TimeValue keepAlive = settings.keepAlive; int core = settings.core; int max = settings.max; final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newScaling( settings.nodeName + "/" + name(), core, max, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory); return new ThreadPool.ExecutorHolder(executor, info); }
Example #4
Source File: BulkProcessor.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); this.bulkRequest = new BulkRequest(); this.bulkRequestHandler = concurrentRequests == 0 ? new SyncBulkRequestHandler(client, listener) : new AsyncBulkRequestHandler(client, listener, concurrentRequests); if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS); } else { this.scheduler = null; this.scheduledFuture = null; } }
Example #5
Source File: InternalClusterService.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override protected void doStart() { add(localNodeMasterListeners); add(taskManager); this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME)); this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes()); Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes(); // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling final String nodeId = DiscoveryService.generateNodeId(settings); final TransportAddress publishAddress = transportService.boundAddress().publishAddress(); DiscoveryNode localNode = new DiscoveryNode(settings.get("name"), nodeId, publishAddress, nodeAttributes, version); DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()); this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build(); this.transportService.setLocalNode(localNode); }
Example #6
Source File: HttpBulkProcessor.java From elasticsearch-helper with Apache License 2.0 | 6 votes |
HttpBulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.client = client; this.listener = listener; this.concurrentRequests = concurrentRequests; this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); this.semaphore = new Semaphore(concurrentRequests); this.bulkRequest = new BulkRequest(); if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS); } else { this.scheduler = null; this.scheduledFuture = null; } }
Example #7
Source File: KafkaRiver.java From elasticsearch-river-kafka with Apache License 2.0 | 6 votes |
@Override public void start() { try { logger.info("creating kafka river: zookeeper = {}, broker = {}, broker_port = {}, message_handler_factory_class = {}", riverConfig.zookeeper, riverConfig.brokerHost, riverConfig.brokerPort, riverConfig.factoryClass); logger.info("part = {}, topic = {}", riverConfig.partition, riverConfig.topic); logger.info("bulkSize = {}, bulkTimeout = {}", riverConfig.bulkSize, riverConfig.bulkTimeout); KafkaRiverWorker worker = new KafkaRiverWorker(this.createMessageHandler(client, riverConfig), riverConfig, client); thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "kafka_river").newThread(worker); thread.start(); } catch (Exception e) { logger.error("Unexpected Error occurred", e); throw new RuntimeException(e); } }
Example #8
Source File: BulkProcessor.java From Elasticsearch with Apache License 2.0 | 6 votes |
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); this.bulkRequest = new BulkRequest(); this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests); if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS); } else { this.scheduler = null; this.scheduledFuture = null; } }
Example #9
Source File: NodeTestConfig.java From elastic-crud with Apache License 2.0 | 6 votes |
@Bean(destroyMethod="close") Node newNode() throws NodeValidationException { final Path tempDir = createTempDir().toPath(); final Settings settings = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), new ClusterName("single-node-cluster" + System.nanoTime())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) .put("node.name", "single-node") .put("script.inline", "true") .put("script.stored", "true") .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("discovery.type", "zen") .put("transport.type", "local") .put(Node.NODE_DATA_SETTING.getKey(), true) .put(NODE_ID_SEED_SETTING.getKey(), System.nanoTime()) .build(); return new Node(settings).start(); // NOSONAR }
Example #10
Source File: GraphiteService.java From elasticsearch-graphite-plugin with Do What The F*ck You Want To Public License | 5 votes |
@Override protected void doStart() throws ElasticsearchException { if (graphiteHost != null && graphiteHost.length() > 0) { graphiteReporterThread = EsExecutors.daemonThreadFactory(settings, "graphite_reporter").newThread(new GraphiteReporterThread(graphiteInclusionRegex, graphiteExclusionRegex)); graphiteReporterThread.start(); StringBuilder sb = new StringBuilder(); if (graphiteInclusionRegex != null) sb.append("include [").append(graphiteInclusionRegex).append("] "); if (graphiteExclusionRegex != null) sb.append("exclude [").append(graphiteExclusionRegex).append("] "); logger.info("Graphite reporting triggered every [{}] to host [{}:{}] with metric prefix [{}] {}", graphiteRefreshInternal, graphiteHost, graphitePort, graphitePrefix, sb); } else { logger.error("Graphite reporting disabled, no graphite host configured"); } }
Example #11
Source File: AnomalyDetectorJobRunnerTests.java From anomaly-detection with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { super.setUp(); super.setUpLog4jForJUnit(AnomalyDetectorJobRunner.class); MockitoAnnotations.initMocks(this); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName("node1", "test-ad")); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); executorService = EsExecutors.newFixed("test-ad", 4, 100, threadFactory, threadContext); doReturn(executorService).when(mockedThreadPool).executor(anyString()); runner.setThreadPool(mockedThreadPool); runner.setClient(client); runner.setClientUtil(clientUtil); runner.setAnomalyResultHandler(anomalyResultHandler); setUpJobParameter(); runner .setSettings( Settings .builder() .put("opendistro.anomaly_detection.max_retry_for_backoff", 2) .put("opendistro.anomaly_detection.backoff_initial_delay", TimeValue.timeValueMillis(1)) .put("opendistro.anomaly_detection.max_retry_for_end_run_exception", 3) .build() ); lockService = new LockService(client, clusterService); doReturn(lockService).when(context).getLockService(); }
Example #12
Source File: SeedHostsResolver.java From crate with Apache License 2.0 | 5 votes |
@Override protected void doStart() { LOGGER.debug("using max_concurrent_resolvers [{}], resolver timeout [{}]", concurrentConnects, resolveTimeout); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_configured_hosts_resolver]"); executorService.set(EsExecutors.newScaling( nodeName + "/" + "unicast_configured_hosts_resolver", 0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory )); }
Example #13
Source File: ElasticsearchConcurrentMergeScheduler.java From crate with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("sync-override") protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException { MergeThread thread = super.getMergeThread(writer, merge); thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName())); return thread; }
Example #14
Source File: ExecutorBuilder.java From crate with Apache License 2.0 | 5 votes |
protected int applyHardSizeLimit(final Settings settings, final String name) { if (name.equals(ThreadPool.Names.WRITE)) { return 1 + EsExecutors.numberOfProcessors(settings); } else { return Integer.MAX_VALUE; } }
Example #15
Source File: FixedExecutorBuilder.java From crate with Apache License 2.0 | 5 votes |
@Override ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings) { int size = settings.size; int queueSize = settings.queueSize; final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newFixed( settings.nodeName + "/" + name(), size, queueSize, threadFactory ); final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); return new ThreadPool.ExecutorHolder(executor, info); }
Example #16
Source File: Scheduler.java From crate with Apache License 2.0 | 5 votes |
/** * Create a scheduler that can be used client side. Server side, please use <code>ThreadPool.schedule</code> instead. * * Notice that if any scheduled jobs fail with an exception, these will bubble up to the uncaught exception handler where they will * be logged as a warning. This includes jobs started using execute, submit and schedule. * @param settings the settings to use * @return executor */ static ScheduledThreadPoolExecutor initScheduler(Settings settings) { final ScheduledThreadPoolExecutor scheduler = new SafeScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy()); scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); scheduler.setRemoveOnCancelPolicy(true); return scheduler; }
Example #17
Source File: Scheduler.java From crate with Apache License 2.0 | 5 votes |
@Override protected void afterExecute(Runnable r, Throwable t) { if (t != null) return; // Scheduler only allows Runnable's so we expect no checked exceptions here. If anyone uses submit directly on `this`, we // accept the wrapped exception in the output. ExceptionsHelper.reThrowIfNotNull(EsExecutors.rethrowErrors(r)); }
Example #18
Source File: OsService.java From crate with Apache License 2.0 | 5 votes |
public OsService(Settings settings) { this.probe = OsProbe.getInstance(); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings)); this.osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); LOGGER.debug("using refresh_interval [{}]", refreshInterval); }
Example #19
Source File: InternalCountOperation.java From crate with Apache License 2.0 | 5 votes |
@Inject public InternalCountOperation(Settings settings, Schemas schemas, LuceneQueryBuilder queryBuilder, ClusterService clusterService, ThreadPool threadPool, IndicesService indicesService) { this.schemas = schemas; this.queryBuilder = queryBuilder; this.clusterService = clusterService; executor = (ThreadPoolExecutor) threadPool.executor(ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.numProcessors = EsExecutors.numberOfProcessors(settings); }
Example #20
Source File: TransportFetchNodeAction.java From crate with Apache License 2.0 | 5 votes |
@Inject public TransportFetchNodeAction(Settings settings, TransportService transportService, Transports transports, ThreadPool threadPool, JobsLogs jobsLogs, TasksService tasksService, CircuitBreakerService circuitBreakerService) { this.transports = transports; this.nodeFetchOperation = new NodeFetchOperation( (ThreadPoolExecutor) threadPool.executor(ThreadPool.Names.SEARCH), EsExecutors.numberOfProcessors(settings), jobsLogs, tasksService, circuitBreakerService.getBreaker(HierarchyCircuitBreakerService.QUERY) ); transportService.registerRequestHandler( TRANSPORT_ACTION, NodeFetchRequest::new, EXECUTOR_NAME, // force execution because this handler might receive empty close requests which // need to be processed to not leak the FetchTask. // This shouldn't cause too much of an issue because fetch requests always happen after a query phase. // If the threadPool is overloaded the query phase would fail first. true, false, new NodeActionRequestHandler<>(this) ); }
Example #21
Source File: AnomalyDetectorPlugin.java From anomaly-detection with Apache License 2.0 | 5 votes |
@Override public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) { return Collections .singletonList( new FixedExecutorBuilder( settings, AD_THREAD_POOL_NAME, Math.max(1, EsExecutors.allocatedProcessors(settings) / 4), AnomalyDetectorSettings.AD_THEAD_POOL_QUEUE_SIZE, "opendistro.ad." + AD_THREAD_POOL_NAME ) ); }
Example #22
Source File: IndicesTTLService.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeSettingsService nodeSettingsService, TransportBulkAction bulkAction) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; TimeValue interval = this.settings.getAsTime("indices.ttl.interval", DEFAULT_TTL_INTERVAL); this.bulkAction = bulkAction; this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000); this.purgerThread = new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval); nodeSettingsService.addListener(new ApplySettings()); }
Example #23
Source File: OsService.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public OsService(Settings settings, OsProbe probe) { super(settings); this.probe = probe; TimeValue refreshInterval = settings.getAsTime("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1)); this.info = probe.osInfo(); this.info.refreshInterval = refreshInterval.millis(); this.info.allocatedProcessors = EsExecutors.boundedNumberOfProcessors(settings); osStatsCache = new OsStatsCache(refreshInterval, probe.osStats()); logger.debug("Using probe [{}] with refresh_interval [{}]", probe, refreshInterval); }
Example #24
Source File: ThreadPool.java From Elasticsearch with Apache License 2.0 | 5 votes |
private int applyHardSizeLimit(String name, int size) { int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); if ((name.equals(Names.BULK) || name.equals(Names.INDEX)) && size > availableProcessors) { // We use a hard max size for the indexing pools, because if too many threads enter Lucene's IndexWriter, it means // too many segments written, too frequently, too much merging, etc: // TODO: I would love to be loud here (throw an exception if you ask for a too-big size), but I think this is dangerous // because on upgrade this setting could be in cluster state and hard for the user to correct? logger.warn("requested thread pool size [{}] for [{}] is too large; setting to maximum [{}] instead", size, name, availableProcessors); size = availableProcessors; } return size; }
Example #25
Source File: KafkaRiver.java From elasticsearch-river-kafka with Apache License 2.0 | 5 votes |
@Override public void start() { try { logger.debug("Index: {}: Starting Kafka River...", riverConfig.getIndexName()); final KafkaWorker kafkaWorker = new KafkaWorker(kafkaConsumer, elasticsearchProducer, riverConfig, stats); thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "Kafka River Worker").newThread(kafkaWorker); thread.start(); } catch (Exception ex) { logger.error("Index: {}: Unexpected Error occurred", ex, riverConfig.getIndexName()); throw new RuntimeException(ex); } }
Example #26
Source File: LocalTransport.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public LocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { super(settings); this.threadPool = threadPool; this.version = version; int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings)); int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX); this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory); this.namedWriteableRegistry = namedWriteableRegistry; }
Example #27
Source File: NettyTransport.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Inject public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) { super(settings); this.threadPool = threadPool; this.networkService = networkService; this.bigArrays = bigArrays; this.version = version; if (settings.getAsBoolean("netty.epollBugWorkaround", false)) { System.setProperty("org.jboss.netty.epollBugWorkaround", "true"); } this.workerCount = settings.getAsInt(WORKER_COUNT, EsExecutors.boundedNumberOfProcessors(settings) * 2); this.blockingClient = settings.getAsBoolean("transport.netty.transport.tcp.blocking_client", settings.getAsBoolean(TCP_BLOCKING_CLIENT, settings.getAsBoolean(TCP_BLOCKING, false))); this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT))); this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null); this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1); this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false); this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2)); this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3)); this.connectionsPerNodeReg = this.settings.getAsInt("transport.netty.connections_per_node.reg", settings.getAsInt(CONNECTIONS_PER_NODE_REG, 6)); this.connectionsPerNodeState = this.settings.getAsInt("transport.netty.connections_per_node.high", settings.getAsInt(CONNECTIONS_PER_NODE_STATE, 1)); this.connectionsPerNodePing = this.settings.getAsInt("transport.netty.connections_per_node.ping", settings.getAsInt(CONNECTIONS_PER_NODE_PING, 1)); // we want to have at least 1 for reg/state/ping if (this.connectionsPerNodeReg == 0) { throw new IllegalArgumentException("can't set [connection_per_node.reg] to 0"); } if (this.connectionsPerNodePing == 0) { throw new IllegalArgumentException("can't set [connection_per_node.ping] to 0"); } if (this.connectionsPerNodeState == 0) { throw new IllegalArgumentException("can't set [connection_per_node.state] to 0"); } long defaultReceiverPredictor = 512 * 1024; if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { // we can guess a better default... long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount); defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024)); } // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one this.receivePredictorMin = this.settings.getAsBytesSize("transport.netty.receive_predictor_min", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor))); this.receivePredictorMax = this.settings.getAsBytesSize("transport.netty.receive_predictor_max", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor))); if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) { receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes()); } else { receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes()); } this.scheduledPing = new ScheduledPing(); this.pingSchedule = settings.getAsTime(PING_SCHEDULE, DEFAULT_PING_SCHEDULE); if (pingSchedule.millis() > 0) { threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing); } this.namedWriteableRegistry = namedWriteableRegistry; }
Example #28
Source File: BlobHeadRequestHandlerTests.java From crate with Apache License 2.0 | 4 votes |
@Test public void testPutHeadChunkRunnableFileGrowth() throws Exception { File file = File.createTempFile("test", ""); try (final FileOutputStream outputStream = new FileOutputStream(file)) { outputStream.write(new byte[]{0x65}); UUID transferId = UUID.randomUUID(); BlobTransferTarget blobTransferTarget = mock(BlobTransferTarget.class); TransportService transportService = mock(TransportService.class); DiscoveryNode discoveryNode = mock(DiscoveryNode.class); DigestBlob digestBlob = mock(DigestBlob.class); when(digestBlob.file()).thenReturn(file); ScheduledExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(EsExecutors.daemonThreadFactory("blob-head")); try { scheduledExecutor.schedule(new Runnable() { @Override public void run() { try { outputStream.write(new byte[]{0x66, 0x67, 0x68, 0x69}); } catch (IOException ex) { //pass } } }, 800, TimeUnit.MILLISECONDS); PutHeadChunkRunnable runnable = new PutHeadChunkRunnable( digestBlob, 5, transportService, blobTransferTarget, discoveryNode, transferId ); @SuppressWarnings("unchecked") TransportFuture<TransportResponse.Empty> result = mock(TransportFuture.class); when(transportService.submitRequest( eq(discoveryNode), eq(BlobHeadRequestHandler.Actions.PUT_BLOB_HEAD_CHUNK), any(TransportRequest.class), any(TransportRequestOptions.class), eq(EmptyTransportResponseHandler.INSTANCE_SAME) )).thenReturn(result); runnable.run(); verify(blobTransferTarget).putHeadChunkTransferFinished(transferId); verify(transportService, times(2)).submitRequest( eq(discoveryNode), eq(BlobHeadRequestHandler.Actions.PUT_BLOB_HEAD_CHUNK), any(TransportRequest.class), any(TransportRequestOptions.class), eq(EmptyTransportResponseHandler.INSTANCE_SAME) ); } finally { scheduledExecutor.awaitTermination(1, TimeUnit.SECONDS); scheduledExecutor.shutdownNow(); } } }
Example #29
Source File: InternalTestCluster.java From crate with Apache License 2.0 | 4 votes |
private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); if (random.nextBoolean()) { builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values())); } builder.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1 + random.nextInt(3)); // randomize tcp settings if (random.nextBoolean()) { builder.put(TransportSettings.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1); builder.put(TransportSettings.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1); builder.put(TransportSettings.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1); } if (random.nextBoolean()) { builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), timeValueSeconds(RandomNumbers.randomIntBetween(random, 10, 30)).getStringRep()); } if (random.nextInt(10) == 0) { builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop"); builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop"); } if (random.nextBoolean()) { if (random.nextInt(10) == 0) { // do something crazy slow here builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)); } else { builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)); } } if (random.nextBoolean()) { builder.put(TransportSettings.PING_SCHEDULE.getKey(), RandomNumbers.randomIntBetween(random, 100, 2000) + "ms"); } return builder.build(); }
Example #30
Source File: UnicastZenPing.java From Elasticsearch with Apache License 2.0 | 4 votes |
@Inject public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) { super(settings); this.threadPool = threadPool; this.transportService = transportService; this.clusterName = clusterName; this.electMasterService = electMasterService; if (unicastHostsProviders != null) { for (UnicastHostsProvider unicastHostsProvider : unicastHostsProviders) { addHostsProvider(unicastHostsProvider); } } this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10); String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS); // trim the hosts for (int i = 0; i < hostArr.length; i++) { hostArr[i] = hostArr[i].trim(); } List<String> hosts = CollectionUtils.arrayAsArrayList(hostArr); final int limitPortCounts; if (hosts.isEmpty()) { // if unicast hosts are not specified, fill with simple defaults on the local machine limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; hosts.addAll(transportService.getLocalAddresses()); } else { // we only limit to 1 addresses, makes no sense to ping 100 ports limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; } logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects); List<DiscoveryNode> configuredTargetNodes = new ArrayList<>(); for (String host : hosts) { try { TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts); for (TransportAddress address : addresses) { configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", address, version.minimumCompatibilityVersion())); } } catch (Exception e) { throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e); } } this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]); transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest.class, ThreadPool.Names.SAME, new UnicastPingRequestHandler()); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastConnectExecutor = EsExecutors.newScaling("unicast_connect", 0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory); }