Java Code Examples for java.util.concurrent.ExecutorCompletionService#take()
The following examples show how to use
java.util.concurrent.ExecutorCompletionService#take() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestIdLock.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testMultipleClients() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS); try { ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i = 0; i < NUM_THREADS; ++i) { Future<Boolean> result = ecs.take(); assertTrue(result.get()); } idLock.assertMapEmpty(); } finally { exec.shutdown(); exec.awaitTermination(5000, TimeUnit.MILLISECONDS); } }
Example 2
Source File: StartMojo.java From docker-maven-plugin with Apache License 2.0 | 6 votes |
private void waitForStartedContainer( final ExecutorCompletionService<StartedContainer> containerStartupService, final Set<String> startedContainerAliases, final Queue<ImageConfiguration> imagesStarting) throws InterruptedException, IOException, ExecException { final Future<StartedContainer> startedContainerFuture = containerStartupService.take(); try { final StartedContainer startedContainer = startedContainerFuture.get(); final ImageConfiguration imageConfig = startedContainer.imageConfig; updateAliasesSet(startedContainerAliases, imageConfig.getAlias()); // All done with this image imagesStarting.remove(imageConfig); } catch (ExecutionException e) { rethrowCause(e); } }
Example 3
Source File: ExecutorCompletionServiceTest.java From j2objc with Apache License 2.0 | 5 votes |
/** * A taken submitted task is completed */ public void testTake() throws InterruptedException { final ExecutorService e = Executors.newCachedThreadPool(); final ExecutorCompletionService ecs = new ExecutorCompletionService(e); try (PoolCleaner cleaner = cleaner(e)) { Callable c = new StringTask(); ecs.submit(c); Future f = ecs.take(); assertTrue(f.isDone()); } }
Example 4
Source File: ExecutorCompletionServiceTest.java From j2objc with Apache License 2.0 | 5 votes |
/** * Take returns the same future object returned by submit */ public void testTake2() throws InterruptedException { final ExecutorService e = Executors.newCachedThreadPool(); final ExecutorCompletionService ecs = new ExecutorCompletionService(e); try (PoolCleaner cleaner = cleaner(e)) { Callable c = new StringTask(); Future f1 = ecs.submit(c); Future f2 = ecs.take(); assertSame(f1, f2); } }
Example 5
Source File: TestIdReadWriteLockWithObjectPool.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testMultipleClients() throws Exception { ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS); try { ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_THREADS; ++i) ecs.submit(new IdLockTestThread("client_" + i)); for (int i = 0; i < NUM_THREADS; ++i) { Future<Boolean> result = ecs.take(); assertTrue(result.get()); } int entryPoolSize = idLock.purgeAndGetEntryPoolSize(); LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize); ReferenceType refType = idLock.getReferenceType(); switch (refType) { case WEAK: // make sure the entry pool will be cleared after GC and purge call assertEquals(0, entryPoolSize); break; case SOFT: // make sure the entry pool won't be cleared when JVM memory is enough // even after GC and purge call assertEquals(NUM_IDS, entryPoolSize); break; default: break; } } finally { exec.shutdown(); exec.awaitTermination(5000, TimeUnit.MILLISECONDS); } }
Example 6
Source File: AbstractExperimentRunner.java From quaerite with Apache License 2.0 | 4 votes |
void runExperiment(Experiment experiment, List<Scorer> scorers, int maxRows, ExperimentDB experimentDB, JudgmentList judgmentList, String judgmentListId, boolean logResults) throws SQLException, IOException, SearchClientException { if (experimentDB.hasScores(experiment.getName())) { LOG.info("Already has scores for " + experiment.getName() + "; skipping. " + "Use the -freshStart commandline option to clear all scores"); return; } experimentDB.initScoreTable(scorers); SearchClient searchClient = SearchClientFactory.getClient(experiment.getSearchServerUrl()); if (StringUtils.isBlank(experimentConfig.getIdField())) { LOG.info("default document 'idField' not set in experiment config. " + "Will use default: '" + searchClient.getDefaultIdField() + "'"); experimentConfig.setIdField(searchClient.getDefaultIdField()); } JudgmentList validated = searchServerValidatedMap.get( experiment.getSearchServerUrl() + "_" + judgmentListId); if (validated == null) { validated = validate(searchClient, judgmentList); searchServerValidatedMap.put(experiment.getSearchServerUrl() + "_" + judgmentListId, validated); } ExecutorService executorService = Executors.newFixedThreadPool( experimentConfig.getNumThreads()); ExecutorCompletionService<Integer> executorCompletionService = new ExecutorCompletionService<>(executorService); ArrayBlockingQueue<Judgments> queue = new ArrayBlockingQueue<>( validated.getJudgmentsList().size() + experimentConfig.getNumThreads()); queue.addAll(validated.getJudgmentsList()); for (int i = 0; i < experimentConfig.getNumThreads(); i++) { queue.add(POISON); } for (int i = 0; i < experimentConfig.getNumThreads(); i++) { executorCompletionService.submit( new QueryRunner(experimentConfig.getIdField(), maxRows, queue, experiment, experimentDB, scorers)); } int completed = 0; while (completed < experimentConfig.getNumThreads()) { try { Future<Integer> future = executorCompletionService.take(); future.get(); } catch (Exception e) { e.printStackTrace(); } finally { completed++; } } executorService.shutdown(); executorService.shutdownNow(); //insertScores(experimentDB, experimentName, scoreAggregators); experimentDB.insertScoresAggregated(experiment.getName(), scorers); if (logResults) { logResults(experiment.getName(), scorers); } }
Example 7
Source File: TestInterProcessSemaphore.java From xian with Apache License 2.0 | 4 votes |
@Test public void testReleaseInChunks() throws Exception { final Timing timing = new Timing(); final int MAX_LEASES = 11; final int THREADS = 100; final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { final Stepper latch = new Stepper(); final Random random = new Random(); final Counter counter = new Counter(); ExecutorService service = Executors.newCachedThreadPool(); ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service); for ( int i = 0; i < THREADS; ++i ) { completionService.submit ( new Callable<Object>() { @Override public Object call() throws Exception { InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", MAX_LEASES); Lease lease = semaphore.acquire(timing.forWaiting().seconds(), TimeUnit.SECONDS); if ( lease == null ) { throw new Exception("timed out"); } try { synchronized(counter) { ++counter.currentCount; if ( counter.currentCount > counter.maxCount ) { counter.maxCount = counter.currentCount; } counter.notifyAll(); } latch.await(); } finally { synchronized(counter) { --counter.currentCount; } semaphore.returnLease(lease); } return null; } } ); } int remaining = THREADS; while ( remaining > 0 ) { int times = Math.min(random.nextInt(5) + 1, remaining); latch.countDown(times); remaining -= times; Thread.sleep(random.nextInt(100) + 1); } for ( int i = 0; i < THREADS; ++i ) { completionService.take(); } timing.sleepABit(); synchronized(counter) { Assert.assertTrue(counter.currentCount == 0); Assert.assertTrue(counter.maxCount > 0); Assert.assertTrue(counter.maxCount <= MAX_LEASES); System.out.println(counter.maxCount); } } finally { client.close(); } }
Example 8
Source File: TestS3ADeleteManyFiles.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testBulkRenameAndDelete() throws Throwable { final Path scaleTestDir = getTestPath(); final Path srcDir = new Path(scaleTestDir, "src"); final Path finalDir = new Path(scaleTestDir, "final"); final long count = getOperationCount(); ContractTestUtils.rm(fs, scaleTestDir, true, false); fs.mkdirs(srcDir); fs.mkdirs(finalDir); int testBufferSize = fs.getConf() .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE, ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE); // use Executor to speed up file creation ExecutorService exec = Executors.newFixedThreadPool(16); final ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); try { final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z'); for (int i = 0; i < count; ++i) { final String fileName = "foo-" + i; completionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws IOException { ContractTestUtils.createFile(fs, new Path(srcDir, fileName), false, data); return fs.exists(new Path(srcDir, fileName)); } }); } for (int i = 0; i < count; ++i) { final Future<Boolean> future = completionService.take(); try { if (!future.get()) { LOG.warn("cannot create file"); } } catch (ExecutionException e) { LOG.warn("Error while uploading file", e.getCause()); throw e; } } } finally { exec.shutdown(); } int nSrcFiles = fs.listStatus(srcDir).length; fs.rename(srcDir, finalDir); assertEquals(nSrcFiles, fs.listStatus(finalDir).length); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + 0)); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + count / 2)); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + (count - 1))); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + 0)); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + count/2)); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + (count-1))); ContractTestUtils.assertDeleted(fs, finalDir, true, false); }
Example 9
Source File: TestS3ADeleteManyFiles.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testBulkRenameAndDelete() throws Throwable { final Path scaleTestDir = getTestPath(); final Path srcDir = new Path(scaleTestDir, "src"); final Path finalDir = new Path(scaleTestDir, "final"); final long count = getOperationCount(); ContractTestUtils.rm(fs, scaleTestDir, true, false); fs.mkdirs(srcDir); fs.mkdirs(finalDir); int testBufferSize = fs.getConf() .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE, ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE); // use Executor to speed up file creation ExecutorService exec = Executors.newFixedThreadPool(16); final ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec); try { final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z'); for (int i = 0; i < count; ++i) { final String fileName = "foo-" + i; completionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws IOException { ContractTestUtils.createFile(fs, new Path(srcDir, fileName), false, data); return fs.exists(new Path(srcDir, fileName)); } }); } for (int i = 0; i < count; ++i) { final Future<Boolean> future = completionService.take(); try { if (!future.get()) { LOG.warn("cannot create file"); } } catch (ExecutionException e) { LOG.warn("Error while uploading file", e.getCause()); throw e; } } } finally { exec.shutdown(); } int nSrcFiles = fs.listStatus(srcDir).length; fs.rename(srcDir, finalDir); assertEquals(nSrcFiles, fs.listStatus(finalDir).length); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + 0)); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + count / 2)); ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename", new Path(srcDir, "foo-" + (count - 1))); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + 0)); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + count/2)); ContractTestUtils.assertPathExists(fs, "not renamed to dest dir", new Path(finalDir, "foo-" + (count-1))); ContractTestUtils.assertDeleted(fs, finalDir, true, false); }
Example 10
Source File: TestHFileBlock.java From hbase with Apache License 2.0 | 4 votes |
protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException { for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading"); Random rand = defaultRandom(); List<Long> offsets = new ArrayList<>(); List<BlockType> types = new ArrayList<>(); writeBlocks(rand, compressAlgo, path, offsets, null, types, null); FSDataInputStream is = fs.open(path); long fileSize = fs.getFileStatus(path).getLen(); HFileContext meta = new HFileContextBuilder() .withHBaseCheckSum(true) .withIncludesMvcc(includesMemstoreTS) .withIncludesTags(includesTag) .withCompression(compressAlgo) .build(); ReaderContext context = new ReaderContextBuilder() .withInputStreamWrapper(new FSDataInputStreamWrapper(is)) .withFileSize(fileSize) .withFilePath(path) .withFileSystem(fs) .build(); HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc); Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS); ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec); for (int i = 0; i < NUM_READER_THREADS; ++i) { ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize)); } for (int i = 0; i < NUM_READER_THREADS; ++i) { Future<Boolean> result = ecs.take(); assertTrue(result.get()); if (detailedLogging) { LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo + ")"); } } is.close(); } }
Example 11
Source File: TestInterProcessSemaphore.java From curator with Apache License 2.0 | 4 votes |
@Test public void testReleaseInChunks() throws Exception { final Timing timing = new Timing(); final int MAX_LEASES = 11; final int THREADS = 100; final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { final Stepper latch = new Stepper(); final Random random = new Random(); final Counter counter = new Counter(); ExecutorService service = Executors.newCachedThreadPool(); ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service); for ( int i = 0; i < THREADS; ++i ) { completionService.submit ( new Callable<Object>() { @Override public Object call() throws Exception { InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", MAX_LEASES); Lease lease = semaphore.acquire(timing.forWaiting().seconds(), TimeUnit.SECONDS); if ( lease == null ) { throw new Exception("timed out"); } try { synchronized(counter) { ++counter.currentCount; if ( counter.currentCount > counter.maxCount ) { counter.maxCount = counter.currentCount; } counter.notifyAll(); } latch.await(); } finally { synchronized(counter) { --counter.currentCount; } semaphore.returnLease(lease); } return null; } } ); } int remaining = THREADS; while ( remaining > 0 ) { int times = Math.min(random.nextInt(5) + 1, remaining); latch.countDown(times); remaining -= times; Thread.sleep(random.nextInt(100) + 1); } for ( int i = 0; i < THREADS; ++i ) { completionService.take(); } timing.sleepABit(); synchronized(counter) { Assert.assertTrue(counter.currentCount == 0); Assert.assertTrue(counter.maxCount > 0); Assert.assertTrue(counter.maxCount <= MAX_LEASES); System.out.println(counter.maxCount); } } finally { TestCleanState.closeAndTestClean(client); } }
Example 12
Source File: TestThriftSource.java From mt-flume with Apache License 2.0 | 4 votes |
@Test public void testMultipleClients() throws Exception { ExecutorService submitter = Executors.newCachedThreadPool(); client = RpcClientFactory.getThriftInstance(props); Context context = new Context(); context.put("capacity", "1000"); context.put("transactionCapacity", "1000"); channel.configure(context); configureSource(); context.put(ThriftSource.CONFIG_BIND, "0.0.0.0"); context.put(ThriftSource.CONFIG_PORT, String.valueOf(port)); Configurables.configure(source, context); source.start(); ExecutorCompletionService<Void> completionService = new ExecutorCompletionService(submitter); for (int i = 0; i < 30; i++) { completionService.submit(new SubmitHelper(i), null); } //wait for all threads to be done for(int i = 0; i < 30; i++) { completionService.take(); } Transaction transaction = channel.getTransaction(); transaction.begin(); long after = System.currentTimeMillis(); List<Integer> events = Lists.newArrayList(); for (int i = 0; i < 300; i++) { Event event = channel.take(); Assert.assertNotNull(event); Assert.assertTrue(Long.valueOf(event.getHeaders().get("time")) < after); events.add(Integer.parseInt(new String(event.getBody()))); } transaction.commit(); transaction.close(); Collections.sort(events); int index = 0; //30 batches of 10 for(int i = 0; i < 30; i++) { for(int j = 0; j < 10; j++) { Assert.assertEquals(i, events.get(index++).intValue()); } } }
Example 13
Source File: BlobOutputStreamTests.java From azure-storage-android with Apache License 2.0 | 4 votes |
@Test public void testWritesDoubleConcurrency() throws URISyntaxException, StorageException, IOException, InterruptedException { String blobName = BlobTestHelper.generateRandomBlobNameWithPrefix("concurrency"); CloudBlockBlob blockBlob = this.container.getBlockBlobReference(blobName); // setup the blob output stream with a concurrency of 5 BlobRequestOptions options = new BlobRequestOptions(); options.setConcurrentRequestCount(5); BlobOutputStream blobOutputStream = blockBlob.openOutputStream(null, options, null); // set up the execution completion service ExecutorService threadExecutor = Executors.newFixedThreadPool(5); ExecutorCompletionService<Void> completion = new ExecutorCompletionService<Void>(threadExecutor); int tasks = 10; int writes = 10; int length = 512; // submit tasks to write and flush many blocks for (int i = 0; i < tasks; i++) { completion.submit(new WriteTask(blobOutputStream, length, writes, 4 /*flush period*/)); } // wait for all tasks to complete for (int i = 0; i < tasks; i++) { completion.take(); } // shut down the thread executor for this method threadExecutor.shutdown(); // check that blocks were committed ArrayList<BlockEntry> blocks = blockBlob.downloadBlockList(BlockListingFilter.UNCOMMITTED, null, null, null); assertTrue(blocks.size() != 0); // close the stream and check that the blob is the expected length blobOutputStream.close(); blockBlob.downloadAttributes(); assertTrue(blockBlob.getProperties().getLength() == length*writes*tasks); }
Example 14
Source File: SortMergeJoinPlan.java From phoenix with Apache License 2.0 | 4 votes |
/** * Parallel init, when: * 1. {@link #lhsTuple} is null for inner join or left join. * 2. {@link #rhsTuple} is null for inner join. * we could conclude that the join result is null early, set {@link #joinResultNullBecauseOneSideNull} true. * @throws SQLException */ private void init() throws SQLException { ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); ExecutorCompletionService<Boolean> executorCompletionService = new ExecutorCompletionService<Boolean>(threadPoolExecutor); List<Future<Boolean>> futures = new ArrayList<Future<Boolean>>(2); futures.add(executorCompletionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { doInit(true); return lhsTuple == null && ((joinType == JoinType.Inner) || (joinType == JoinType.Left)); } })); futures.add(executorCompletionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { doInit(false); return rhsTuple == null && joinType == JoinType.Inner; } })); try { Future<Boolean> future = executorCompletionService.take(); if(future.get()) { this.joinResultNullBecauseOneSideNull = true; this.initialized = true; return; } future = executorCompletionService.take(); if(future.get()) { this.joinResultNullBecauseOneSideNull = true; } initialized = true; } catch (Throwable throwable) { throw new SQLException("failed in init join iterators", throwable); } finally { clearThreadPoolExecutor(threadPoolExecutor, futures); } }
Example 15
Source File: SortMergeJoinPlan.java From phoenix with Apache License 2.0 | 4 votes |
/** * Parallel init, when: * 1. {@link #lhsTuple} is null. * 2. {@link #rhsTuple} is null for left semi join. * we could conclude that the join result is null early, set {@link #joinResultNullBecauseOneSideNull} true. * @throws SQLException */ private void init() throws SQLException { ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(2); ExecutorCompletionService<Boolean> executorCompletionService = new ExecutorCompletionService<Boolean>(threadPoolExecutor); List<Future<Boolean>> futures = new ArrayList<Future<Boolean>>(2); futures.add(executorCompletionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { advance(true); return lhsTuple == null; } })); futures.add(executorCompletionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { advance(false); return (rhsTuple == null && isSemi); } })); try { Future<Boolean> future = executorCompletionService.take(); if(future.get()) { this.joinResultNullBecauseOneSideNull = true; this.initialized = true; return; } future = executorCompletionService.take(); if(future.get()) { this.joinResultNullBecauseOneSideNull = true; } initialized = true; } catch (Throwable throwable) { throw new SQLException("failed in init join iterators", throwable); } finally { clearThreadPoolExecutor(threadPoolExecutor, futures); } }