Java Code Examples for org.elasticsearch.common.util.concurrent.EsExecutors#newFixed()
The following examples show how to use
org.elasticsearch.common.util.concurrent.EsExecutors#newFixed() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AnomalyDetectorJobRunnerTests.java From anomaly-detection with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { super.setUp(); super.setUpLog4jForJUnit(AnomalyDetectorJobRunner.class); MockitoAnnotations.initMocks(this); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName("node1", "test-ad")); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); executorService = EsExecutors.newFixed("test-ad", 4, 100, threadFactory, threadContext); doReturn(executorService).when(mockedThreadPool).executor(anyString()); runner.setThreadPool(mockedThreadPool); runner.setClient(client); runner.setClientUtil(clientUtil); runner.setAnomalyResultHandler(anomalyResultHandler); setUpJobParameter(); runner .setSettings( Settings .builder() .put("opendistro.anomaly_detection.max_retry_for_backoff", 2) .put("opendistro.anomaly_detection.backoff_initial_delay", TimeValue.timeValueMillis(1)) .put("opendistro.anomaly_detection.max_retry_for_end_run_exception", 3) .build() ); lockService = new LockService(client, clusterService); doReturn(lockService).when(context).getLockService(); }
Example 2
Source File: LocalTransport.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public LocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { super(settings); this.threadPool = threadPool; this.version = version; int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings)); int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX); this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory); this.namedWriteableRegistry = namedWriteableRegistry; }
Example 3
Source File: FixedExecutorBuilder.java From crate with Apache License 2.0 | 5 votes |
@Override ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings) { int size = settings.size; int queueSize = settings.queueSize; final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newFixed( settings.nodeName + "/" + name(), size, queueSize, threadFactory ); final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); return new ThreadPool.ExecutorHolder(executor, info); }
Example 4
Source File: HttpBulkNodeClientTest.java From elasticsearch-helper with Apache License 2.0 | 4 votes |
@Test public void testThreadedRandomDocs() throws Exception { int maxthreads = Runtime.getRuntime().availableProcessors(); long maxactions = MAX_ACTIONS; final long maxloop = NUM_ACTIONS; logger.info("HttpBulkNodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); final HttpBulkNodeClient client = ClientBuilder.builder() .put("host", "127.0.0.1") .put("port", 9200) .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) .setMetric(new LongAdderIngestMetric()) .toHttpBulkNodeClient(); try { client.newIndex("test") .startBulk("test", -1, 1000); ThreadPoolExecutor pool = EsExecutors.newFixed("http-bulk-nodeclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("http-bulk-nodeclient-test")); final CountDownLatch latch = new CountDownLatch(maxthreads); for (int i = 0; i < maxthreads; i++) { pool.execute(new Runnable() { public void run() { for (int i = 0; i < maxloop; i++) { client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); } latch.countDown(); } }); } logger.info("waiting for max 30 seconds..."); latch.await(30, TimeUnit.SECONDS); logger.info("flush..."); client.flushIngest(); client.waitForResponses(TimeValue.timeValueSeconds(30)); logger.info("got all responses, thread pool shutdown..."); pool.shutdown(); logger.info("pool is shut down"); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { client.stopBulk("test"); assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); if (client.hasThrowable()) { logger.error("error", client.getThrowable()); } assertFalse(client.hasThrowable()); client.refreshIndex("test"); SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) .setQuery(QueryBuilders.matchAllQuery()).setSize(0); assertEquals(maxthreads * maxloop, searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); client.shutdown(); } }
Example 5
Source File: IngestTransportClientTest.java From elasticsearch-helper with Apache License 2.0 | 4 votes |
@Test public void testThreadedRandomDocsIngestClient() throws Exception { int maxthreads = Runtime.getRuntime().availableProcessors(); long maxactions = MAX_ACTIONS; final long maxloop = NUM_ACTIONS; Settings settings = Settings.settingsBuilder() .put("index.number_of_shards", 2) .put("index.number_of_replicas", 1) .build(); final IngestTransportClient ingest = ClientBuilder.builder() .put(getSettings()) .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) .setMetric(new LongAdderIngestMetric()) .toIngestTransportClient(); try { ingest.newIndex("test", settings, null) .startBulk("test", -1, 1000); ThreadPoolExecutor pool = EsExecutors.newFixed("ingestclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("ingestclient-test")); final CountDownLatch latch = new CountDownLatch(maxthreads); for (int i = 0; i < maxthreads; i++) { pool.execute(new Runnable() { public void run() { for (int i = 0; i < maxloop; i++) { ingest.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); } latch.countDown(); } }); } logger.info("waiting for max 30 seconds..."); latch.await(30, TimeUnit.SECONDS); logger.info("client flush ..."); ingest.flushIngest(); ingest.waitForResponses(TimeValue.timeValueSeconds(30)); logger.info("thread pool to be shut down ..."); pool.shutdown(); logger.info("thread pool shut down"); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { ingest.stopBulk("test"); assertEquals(maxthreads * maxloop, ingest.getMetric().getSucceeded().getCount()); if (ingest.hasThrowable()) { logger.error("error", ingest.getThrowable()); } assertFalse(ingest.hasThrowable()); ingest.refreshIndex("test"); SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(ingest.client(), SearchAction.INSTANCE) .setIndices("_all") // to avoid NPE .setQuery(QueryBuilders.matchAllQuery()) .setSize(0); assertEquals(maxthreads * maxloop, searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); ingest.shutdown(); } }
Example 6
Source File: BulkTransportClientTest.java From elasticsearch-helper with Apache License 2.0 | 4 votes |
@Test public void testThreadedRandomDocsBulkClient() throws Exception { int maxthreads = Runtime.getRuntime().availableProcessors(); long maxactions = MAX_ACTIONS; final long maxloop = NUM_ACTIONS; Settings settingsForIndex = Settings.settingsBuilder() .put("index.number_of_shards", 2) .put("index.number_of_replicas", 1) .build(); final BulkTransportClient client = ClientBuilder.builder() .put(getSettings()) .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60)) // = disable autoflush for this test .setMetric(new LongAdderIngestMetric()) .toBulkTransportClient(); try { client.newIndex("test", settingsForIndex, null) .startBulk("test", -1, 1000); ThreadPoolExecutor pool = EsExecutors.newFixed("bulkclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("bulkclient-test")); final CountDownLatch latch = new CountDownLatch(maxthreads); for (int i = 0; i < maxthreads; i++) { pool.execute(new Runnable() { public void run() { for (int i = 0; i < maxloop; i++) { client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); } latch.countDown(); } }); } logger.info("waiting for max 30 seconds..."); latch.await(30, TimeUnit.SECONDS); logger.info("client flush ..."); client.flushIngest(); client.waitForResponses(TimeValue.timeValueSeconds(30)); logger.info("thread pool to be shut down ..."); pool.shutdown(); logger.info("poot shut down"); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { client.stopBulk("test"); assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); if (client.hasThrowable()) { logger.error("error", client.getThrowable()); } assertFalse(client.hasThrowable()); client.refreshIndex("test"); SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) .setIndices("_all") // to avoid NPE at org.elasticsearch.action.search.SearchRequest.writeTo(SearchRequest.java:580) .setQuery(QueryBuilders.matchAllQuery()) .setSize(0); assertEquals(maxthreads * maxloop, searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); client.shutdown(); } }
Example 7
Source File: BulkNodeClientTest.java From elasticsearch-helper with Apache License 2.0 | 4 votes |
@Test public void testThreadedRandomDocsNodeClient() throws Exception { int maxthreads = Runtime.getRuntime().availableProcessors(); Long maxactions = MAX_ACTIONS; final Long maxloop = NUM_ACTIONS; logger.info("NodeClient max={} maxactions={} maxloop={}", maxthreads, maxactions, maxloop); final BulkNodeClient client = ClientBuilder.builder() .put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxactions) .put(ClientBuilder.FLUSH_INTERVAL, TimeValue.timeValueSeconds(60))// disable auto flush for this test .setMetric(new LongAdderIngestMetric()) .toBulkNodeClient(client("1")); try { client.newIndex("test") .startBulk("test", -1, 1000); ThreadPoolExecutor pool = EsExecutors.newFixed("bulk-nodeclient-test", maxthreads, 30, EsExecutors.daemonThreadFactory("bulk-nodeclient-test")); final CountDownLatch latch = new CountDownLatch(maxthreads); for (int i = 0; i < maxthreads; i++) { pool.execute(new Runnable() { public void run() { for (int i = 0; i < maxloop; i++) { client.index("test", "test", null, "{ \"name\" : \"" + randomString(32) + "\"}"); } latch.countDown(); } }); } logger.info("waiting for max 30 seconds..."); latch.await(30, TimeUnit.SECONDS); logger.info("flush..."); client.flushIngest(); client.waitForResponses(TimeValue.timeValueSeconds(30)); logger.info("got all responses, thread pool shutdown..."); pool.shutdown(); logger.info("pool is shut down"); } catch (NoNodeAvailableException e) { logger.warn("skipping, no node available"); } finally { client.stopBulk("test"); assertEquals(maxthreads * maxloop, client.getMetric().getSucceeded().getCount()); if (client.hasThrowable()) { logger.error("error", client.getThrowable()); } assertFalse(client.hasThrowable()); client.refreshIndex("test"); SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client.client(), SearchAction.INSTANCE) .setQuery(QueryBuilders.matchAllQuery()).setSize(0); assertEquals(maxthreads * maxloop, searchRequestBuilder.execute().actionGet().getHits().getTotalHits()); client.shutdown(); } }