Java Code Examples for org.apache.flink.runtime.concurrent.FutureUtils#combineAll()
The following examples show how to use
org.apache.flink.runtime.concurrent.FutureUtils#combineAll() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Dispatcher.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public CompletableFuture<MultipleJobsDetails> requestMultipleJobDetails(Time timeout) { List<CompletableFuture<Optional<JobDetails>>> individualOptionalJobDetails = queryJobMastersForInformation( (JobMasterGateway jobMasterGateway) -> jobMasterGateway.requestJobDetails(timeout)); CompletableFuture<Collection<Optional<JobDetails>>> optionalCombinedJobDetails = FutureUtils.combineAll( individualOptionalJobDetails); CompletableFuture<Collection<JobDetails>> combinedJobDetails = optionalCombinedJobDetails.thenApply(this::flattenOptionalCollection); final Collection<JobDetails> completedJobDetails = archivedExecutionGraphStore.getAvailableJobDetails(); return combinedJobDetails.thenApply( (Collection<JobDetails> runningJobDetails) -> { final Collection<JobDetails> allJobDetails = new ArrayList<>(completedJobDetails.size() + runningJobDetails.size()); allJobDetails.addAll(runningJobDetails); allJobDetails.addAll(completedJobDetails); return new MultipleJobsDetails(allJobDetails); }); }
Example 2
Source File: Dispatcher.java From flink with Apache License 2.0 | 6 votes |
@Override public CompletableFuture<MultipleJobsDetails> requestMultipleJobDetails(Time timeout) { List<CompletableFuture<Optional<JobDetails>>> individualOptionalJobDetails = queryJobMastersForInformation( (JobMasterGateway jobMasterGateway) -> jobMasterGateway.requestJobDetails(timeout)); CompletableFuture<Collection<Optional<JobDetails>>> optionalCombinedJobDetails = FutureUtils.combineAll( individualOptionalJobDetails); CompletableFuture<Collection<JobDetails>> combinedJobDetails = optionalCombinedJobDetails.thenApply(this::flattenOptionalCollection); final Collection<JobDetails> completedJobDetails = archivedExecutionGraphStore.getAvailableJobDetails(); return combinedJobDetails.thenApply( (Collection<JobDetails> runningJobDetails) -> { final Collection<JobDetails> allJobDetails = new ArrayList<>(completedJobDetails.size() + runningJobDetails.size()); allJobDetails.addAll(runningJobDetails); allJobDetails.addAll(completedJobDetails); return new MultipleJobsDetails(allJobDetails); }); }
Example 3
Source File: BulkSlotProviderImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public CompletableFuture<Collection<PhysicalSlotRequest.Result>> allocatePhysicalSlots( final Collection<PhysicalSlotRequest> physicalSlotRequests, final Time timeout) { componentMainThreadExecutor.assertRunningInMainThread(); LOG.debug("Received {} slot requests.", physicalSlotRequests.size()); final PhysicalSlotRequestBulk slotRequestBulk = slotRequestBulkChecker.createPhysicalSlotRequestBulk(physicalSlotRequests); final List<CompletableFuture<PhysicalSlotRequest.Result>> resultFutures = new ArrayList<>(physicalSlotRequests.size()); for (PhysicalSlotRequest request : physicalSlotRequests) { final CompletableFuture<PhysicalSlotRequest.Result> resultFuture = allocatePhysicalSlot(request).thenApply(result -> { slotRequestBulk.markRequestFulfilled( result.getSlotRequestId(), result.getPhysicalSlot().getAllocationId()); return result; }); resultFutures.add(resultFuture); } schedulePendingRequestBulkTimeoutCheck(slotRequestBulk, timeout); return FutureUtils.combineAll(resultFutures); }
Example 4
Source File: Execution.java From flink with Apache License 2.0 | 5 votes |
/** * Calculates the preferred locations based on the location preference constraint. * * @param locationPreferenceConstraint constraint for the location preference * @return Future containing the collection of preferred locations. This might not be completed if not all inputs * have been a resource assigned. */ @VisibleForTesting public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) { final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations(); final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture; switch(locationPreferenceConstraint) { case ALL: preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures); break; case ANY: final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size()); for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) { if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) { final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null); if (taskManagerLocation == null) { throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug."); } completedTaskManagerLocations.add(taskManagerLocation); } } preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations); break; default: throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.'); } return preferredLocationsFuture; }
Example 5
Source File: ExecutionGraphCacheTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that concurrent accesses only trigger a single AccessExecutionGraph request. */ @Test public void testConcurrentAccess() throws Exception { final Time timeout = Time.milliseconds(100L); final Time timeToLive = Time.hours(1L); final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraph)); final int numConcurrentAccesses = 10; final ArrayList<CompletableFuture<AccessExecutionGraph>> executionGraphFutures = new ArrayList<>(numConcurrentAccesses); final ExecutorService executor = java.util.concurrent.Executors.newFixedThreadPool(numConcurrentAccesses); try (ExecutionGraphCache executionGraphCache = new ExecutionGraphCache(timeout, timeToLive)) { for (int i = 0; i < numConcurrentAccesses; i++) { CompletableFuture<AccessExecutionGraph> executionGraphFuture = CompletableFuture .supplyAsync( () -> executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway), executor) .thenCompose(Function.identity()); executionGraphFutures.add(executionGraphFuture); } final CompletableFuture<Collection<AccessExecutionGraph>> allExecutionGraphFutures = FutureUtils.combineAll(executionGraphFutures); Collection<AccessExecutionGraph> allExecutionGraphs = allExecutionGraphFutures.get(); for (AccessExecutionGraph executionGraph : allExecutionGraphs) { assertEquals(expectedExecutionGraph, executionGraph); } assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(1)); } finally { ExecutorUtils.gracefulShutdown(5000L, TimeUnit.MILLISECONDS, executor); } }
Example 6
Source File: AdaptedRestartPipelinedRegionStrategyNG.java From flink with Apache License 2.0 | 5 votes |
@VisibleForTesting protected CompletableFuture<?> cancelTasks(final Set<ExecutionVertexID> vertices) { final List<CompletableFuture<?>> cancelFutures = vertices.stream() .map(this::cancelExecutionVertex) .collect(Collectors.toList()); return FutureUtils.combineAll(cancelFutures); }
Example 7
Source File: DefaultScheduler.java From flink with Apache License 2.0 | 5 votes |
private CompletableFuture<?> cancelTasksAsync(final Set<ExecutionVertexID> verticesToRestart) { final List<CompletableFuture<?>> cancelFutures = verticesToRestart.stream() .map(this::cancelExecutionVertex) .collect(Collectors.toList()); return FutureUtils.combineAll(cancelFutures); }
Example 8
Source File: ExecutionGraphCacheTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that concurrent accesses only trigger a single AccessExecutionGraph request. */ @Test public void testConcurrentAccess() throws Exception { final Time timeout = Time.milliseconds(100L); final Time timeToLive = Time.hours(1L); final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraph)); final int numConcurrentAccesses = 10; final ArrayList<CompletableFuture<AccessExecutionGraph>> executionGraphFutures = new ArrayList<>(numConcurrentAccesses); final ExecutorService executor = java.util.concurrent.Executors.newFixedThreadPool(numConcurrentAccesses); try (ExecutionGraphCache executionGraphCache = new ExecutionGraphCache(timeout, timeToLive)) { for (int i = 0; i < numConcurrentAccesses; i++) { CompletableFuture<AccessExecutionGraph> executionGraphFuture = CompletableFuture .supplyAsync( () -> executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway), executor) .thenCompose(Function.identity()); executionGraphFutures.add(executionGraphFuture); } final CompletableFuture<Collection<AccessExecutionGraph>> allExecutionGraphFutures = FutureUtils.combineAll(executionGraphFutures); Collection<AccessExecutionGraph> allExecutionGraphs = allExecutionGraphFutures.get(); for (AccessExecutionGraph executionGraph : allExecutionGraphs) { assertEquals(expectedExecutionGraph, executionGraph); } assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(1)); } finally { ExecutorUtils.gracefulShutdown(5000L, TimeUnit.MILLISECONDS, executor); } }
Example 9
Source File: Execution.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Calculates the preferred locations based on the location preference constraint. * * @param locationPreferenceConstraint constraint for the location preference * @return Future containing the collection of preferred locations. This might not be completed if not all inputs * have been a resource assigned. */ @VisibleForTesting public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) { final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations(); final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture; switch(locationPreferenceConstraint) { case ALL: preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures); break; case ANY: final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size()); for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) { if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) { final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null); if (taskManagerLocation == null) { throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug."); } completedTaskManagerLocations.add(taskManagerLocation); } } preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations); break; default: throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.'); } return preferredLocationsFuture; }
Example 10
Source File: BlobServerPutTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] * Tests that concurrent put operations will only upload the file once to the {@link BlobStore} * and that the files are not corrupt at any time. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentPutOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); BlobStore blobStore = mock(BlobStore.class); int concurrentPutOperations = 2; int dataSize = 1024; final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations); final byte[] data = new byte[dataSize]; ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations); ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); for (int i = 0; i < concurrentPutOperations; i++) { CompletableFuture<BlobKey> putFuture = CompletableFuture .supplyAsync( () -> { try { BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data); BlobKey uploadedKey = put(server, jobId, inputStream, blobType); // check the uploaded file's contents (concurrently) verifyContents(server, jobId, uploadedKey, data); return uploadedKey; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not upload blob.", e)); } }, executor); allFutures.add(putFuture); } FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures); // wait until all operations have completed and check that no exception was thrown Collection<BlobKey> blobKeys = conjunctFuture.get(); Iterator<BlobKey> blobKeyIterator = blobKeys.iterator(); assertTrue(blobKeyIterator.hasNext()); BlobKey blobKey = blobKeyIterator.next(); // make sure that all blob keys are the same while (blobKeyIterator.hasNext()) { verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next()); } // check the uploaded file's contents verifyContents(server, jobId, blobKey, data); // check that we only uploaded the file once to the blob store if (blobType == PERMANENT_BLOB) { verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey)); } else { // can't really verify much in the other cases other than that the put operations should // work and not corrupt files verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey)); } } finally { executor.shutdownNow(); } }
Example 11
Source File: BlobServerGetTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to * download a blob. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentGetOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); final BlobStore blobStore = mock(BlobStore.class); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; doAnswer( new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { File targetFile = (File) invocation.getArguments()[2]; FileUtils.writeByteArrayToFile(targetFile, data); return null; } } ).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class)); final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!) if (blobType == PERMANENT_BLOB) { // remove local copy so that a transfer from HA store takes place assertTrue(server.getStorageLocation(jobId, blobKey).delete()); } for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture.supplyAsync( () -> { try { File file = get(server, jobId, blobKey); // check that we have read the right data validateGetAndClose(new FileInputStream(file), data); return file; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); } finally { executor.shutdownNow(); } }
Example 12
Source File: BlobCacheGetTest.java From flink with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to * download a blob. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient * @param cacheAccessesHAStore * whether the cache has access to the {@link BlobServer}'s HA store or not */ private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType, final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); final BlobStore blobStoreServer = mock(BlobStore.class); final BlobStore blobStoreCache = mock(BlobStore.class); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try ( final BlobServer server = new BlobServer(config, blobStoreServer); final BlobCacheService cache = new BlobCacheService(config, cacheAccessesHAStore ? blobStoreServer : blobStoreCache, new InetSocketAddress("localhost", server.getPort()) )) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!) for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture .supplyAsync( () -> { try { File file = get(cache, jobId, blobKey); // check that we have read the right data validateGetAndClose(new FileInputStream(file), data); return file; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); if (blobType == PERMANENT_BLOB) { // wait until all operations have completed and check that no exception was thrown filesFuture.get(); } else { // wait for all futures to complete (do not abort on expected exceptions) and check // that at least one succeeded int completedSuccessfully = 0; for (CompletableFuture<File> op : getOperations) { try { op.get(); ++completedSuccessfully; } catch (Throwable t) { // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) { // ignore org.apache.flink.util.ExceptionUtils.rethrowIOException(t); } } } // multiple clients may have accessed the BLOB successfully before it was // deleted, but always at least one: assertThat(completedSuccessfully, greaterThanOrEqualTo(1)); } } finally { executor.shutdownNow(); } }
Example 13
Source File: BlobServerPutTest.java From flink with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] * Tests that concurrent put operations will only upload the file once to the {@link BlobStore} * and that the files are not corrupt at any time. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentPutOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); BlobStore blobStore = mock(BlobStore.class); int concurrentPutOperations = 2; int dataSize = 1024; final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations); final byte[] data = new byte[dataSize]; ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations); ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); for (int i = 0; i < concurrentPutOperations; i++) { CompletableFuture<BlobKey> putFuture = CompletableFuture .supplyAsync( () -> { try { BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data); BlobKey uploadedKey = put(server, jobId, inputStream, blobType); // check the uploaded file's contents (concurrently) verifyContents(server, jobId, uploadedKey, data); return uploadedKey; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not upload blob.", e)); } }, executor); allFutures.add(putFuture); } FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures); // wait until all operations have completed and check that no exception was thrown Collection<BlobKey> blobKeys = conjunctFuture.get(); Iterator<BlobKey> blobKeyIterator = blobKeys.iterator(); assertTrue(blobKeyIterator.hasNext()); BlobKey blobKey = blobKeyIterator.next(); // make sure that all blob keys are the same while (blobKeyIterator.hasNext()) { verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next()); } // check the uploaded file's contents verifyContents(server, jobId, blobKey, data); // check that we only uploaded the file once to the blob store if (blobType == PERMANENT_BLOB) { verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey)); } else { // can't really verify much in the other cases other than that the put operations should // work and not corrupt files verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey)); } } finally { executor.shutdownNow(); } }
Example 14
Source File: BlobCacheCleanupTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that {@link TransientBlobCache} cleans up after a default TTL and keeps files which are * constantly accessed. */ private void testTransientBlobCleanup(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException { // 1s should be a safe-enough buffer to still check for existence after a BLOB's last access long cleanupInterval = 1L; // in seconds final int numberConcurrentGetOperations = 3; final List<CompletableFuture<Void>> getOperations = new ArrayList<>(numberConcurrentGetOperations); byte[] data = new byte[2000000]; rnd.nextBytes(data); byte[] data2 = Arrays.copyOfRange(data, 10, 54); Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); config.setLong(BlobServerOptions.CLEANUP_INTERVAL, cleanupInterval); long cleanupLowerBound; try ( BlobServer server = new BlobServer(config, new VoidBlobStore()); final BlobCacheService cache = new BlobCacheService( config, new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort()) )) { ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> transientBlobExpiryTimes = cache.getTransientBlobService().getBlobExpiryTimes(); server.start(); final TransientBlobKey key1 = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB); final TransientBlobKey key2 = (TransientBlobKey) put(server, jobId, data2, TRANSIENT_BLOB); // access key1, verify expiry times cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key1, data); final Long key1ExpiryFirstAccess = transientBlobExpiryTimes.get(Tuple2.of(jobId, key1)); assertThat(key1ExpiryFirstAccess, greaterThanOrEqualTo(cleanupLowerBound)); assertNull(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2))); // access key2, verify expiry times (delay at least 1ms to also verify key1 expiry is unchanged) Thread.sleep(1); cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key2, data2); assertEquals(key1ExpiryFirstAccess, transientBlobExpiryTimes.get(Tuple2.of(jobId, key1))); assertThat(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2)), greaterThanOrEqualTo(cleanupLowerBound)); // files are cached now for the given TTL - remove from server so that they are not re-downloaded if (jobId != null) { server.cleanupJob(jobId, true); } else { server.deleteFromCache(key1); server.deleteFromCache(key2); } checkFileCountForJob(0, jobId, server); // cleanup task is run every cleanupInterval seconds // => unaccessed file should remain at most 2*cleanupInterval seconds // (use 3*cleanupInterval to check that we can still access it) final long finishTime = System.currentTimeMillis() + 3 * cleanupInterval; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<Void> getOperation = CompletableFuture .supplyAsync( () -> { try { // constantly access key1 so this should not get deleted while (System.currentTimeMillis() < finishTime) { get(cache, jobId, key1); } return null; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not retrieve blob.", e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<Void>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); verifyDeletedEventually(server, jobId, key1, key2); } }
Example 15
Source File: BlobCacheGetTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to * download a blob. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient * @param cacheAccessesHAStore * whether the cache has access to the {@link BlobServer}'s HA store or not */ private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType, final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); final BlobStore blobStoreServer = mock(BlobStore.class); final BlobStore blobStoreCache = mock(BlobStore.class); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try ( final BlobServer server = new BlobServer(config, blobStoreServer); final BlobCacheService cache = new BlobCacheService(config, cacheAccessesHAStore ? blobStoreServer : blobStoreCache, new InetSocketAddress("localhost", server.getPort()) )) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!) for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture .supplyAsync( () -> { try { File file = get(cache, jobId, blobKey); // check that we have read the right data validateGetAndClose(new FileInputStream(file), data); return file; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); if (blobType == PERMANENT_BLOB) { // wait until all operations have completed and check that no exception was thrown filesFuture.get(); } else { // wait for all futures to complete (do not abort on expected exceptions) and check // that at least one succeeded int completedSuccessfully = 0; for (CompletableFuture<File> op : getOperations) { try { op.get(); ++completedSuccessfully; } catch (Throwable t) { // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) { // ignore org.apache.flink.util.ExceptionUtils.rethrowIOException(t); } } } // multiple clients may have accessed the BLOB successfully before it was // deleted, but always at least one: assertThat(completedSuccessfully, greaterThanOrEqualTo(1)); } } finally { executor.shutdownNow(); } }
Example 16
Source File: BlobCacheGetTest.java From flink with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to * download a blob. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient * @param cacheAccessesHAStore * whether the cache has access to the {@link BlobServer}'s HA store or not */ private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType, final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); final BlobStore blobStoreServer = mock(BlobStore.class); final BlobStore blobStoreCache = mock(BlobStore.class); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try ( final BlobServer server = new BlobServer(config, blobStoreServer); final BlobCacheService cache = new BlobCacheService(config, cacheAccessesHAStore ? blobStoreServer : blobStoreCache, new InetSocketAddress("localhost", server.getPort()) )) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!) for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture .supplyAsync( () -> { try { File file = get(cache, jobId, blobKey); // check that we have read the right data validateGetAndClose(new FileInputStream(file), data); return file; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); if (blobType == PERMANENT_BLOB) { // wait until all operations have completed and check that no exception was thrown filesFuture.get(); } else { // wait for all futures to complete (do not abort on expected exceptions) and check // that at least one succeeded int completedSuccessfully = 0; for (CompletableFuture<File> op : getOperations) { try { op.get(); ++completedSuccessfully; } catch (Throwable t) { // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) { // ignore org.apache.flink.util.ExceptionUtils.rethrowIOException(t); } } } // multiple clients may have accessed the BLOB successfully before it was // deleted, but always at least one: assertThat(completedSuccessfully, greaterThanOrEqualTo(1)); } } finally { executor.shutdownNow(); } }
Example 17
Source File: BlobCacheCleanupTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that {@link TransientBlobCache} cleans up after a default TTL and keeps files which are * constantly accessed. */ private void testTransientBlobCleanup(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException { // 1s should be a safe-enough buffer to still check for existence after a BLOB's last access long cleanupInterval = 1L; // in seconds final int numberConcurrentGetOperations = 3; final List<CompletableFuture<Void>> getOperations = new ArrayList<>(numberConcurrentGetOperations); byte[] data = new byte[2000000]; rnd.nextBytes(data); byte[] data2 = Arrays.copyOfRange(data, 10, 54); Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); config.setLong(BlobServerOptions.CLEANUP_INTERVAL, cleanupInterval); long cleanupLowerBound; try ( BlobServer server = new BlobServer(config, new VoidBlobStore()); final BlobCacheService cache = new BlobCacheService( config, new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort()) )) { ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> transientBlobExpiryTimes = cache.getTransientBlobService().getBlobExpiryTimes(); server.start(); final TransientBlobKey key1 = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB); final TransientBlobKey key2 = (TransientBlobKey) put(server, jobId, data2, TRANSIENT_BLOB); // access key1, verify expiry times cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key1, data); final Long key1ExpiryFirstAccess = transientBlobExpiryTimes.get(Tuple2.of(jobId, key1)); assertThat(key1ExpiryFirstAccess, greaterThanOrEqualTo(cleanupLowerBound)); assertNull(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2))); // access key2, verify expiry times (delay at least 1ms to also verify key1 expiry is unchanged) Thread.sleep(1); cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key2, data2); assertEquals(key1ExpiryFirstAccess, transientBlobExpiryTimes.get(Tuple2.of(jobId, key1))); assertThat(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2)), greaterThanOrEqualTo(cleanupLowerBound)); // files are cached now for the given TTL - remove from server so that they are not re-downloaded if (jobId != null) { server.cleanupJob(jobId, true); } else { server.deleteFromCache(key1); server.deleteFromCache(key2); } checkFileCountForJob(0, jobId, server); // cleanup task is run every cleanupInterval seconds // => unaccessed file should remain at most 2*cleanupInterval seconds // (use 3*cleanupInterval to check that we can still access it) final long finishTime = System.currentTimeMillis() + 3 * cleanupInterval; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<Void> getOperation = CompletableFuture .supplyAsync( () -> { try { // constantly access key1 so this should not get deleted while (System.currentTimeMillis() < finishTime) { get(cache, jobId, key1); } return null; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not retrieve blob.", e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<Void>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); verifyDeletedEventually(server, jobId, key1, key2); } }
Example 18
Source File: BlobServerPutTest.java From flink with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] * Tests that concurrent put operations will only upload the file once to the {@link BlobStore} * and that the files are not corrupt at any time. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentPutOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); BlobStore blobStore = mock(BlobStore.class); int concurrentPutOperations = 2; int dataSize = 1024; final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations); final byte[] data = new byte[dataSize]; ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations); ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); for (int i = 0; i < concurrentPutOperations; i++) { CompletableFuture<BlobKey> putFuture = CompletableFuture .supplyAsync( () -> { try { BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data); BlobKey uploadedKey = put(server, jobId, inputStream, blobType); // check the uploaded file's contents (concurrently) verifyContents(server, jobId, uploadedKey, data); return uploadedKey; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not upload blob.", e)); } }, executor); allFutures.add(putFuture); } FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures); // wait until all operations have completed and check that no exception was thrown Collection<BlobKey> blobKeys = conjunctFuture.get(); Iterator<BlobKey> blobKeyIterator = blobKeys.iterator(); assertTrue(blobKeyIterator.hasNext()); BlobKey blobKey = blobKeyIterator.next(); // make sure that all blob keys are the same while (blobKeyIterator.hasNext()) { verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next()); } // check the uploaded file's contents verifyContents(server, jobId, blobKey, data); // check that we only uploaded the file once to the blob store if (blobType == PERMANENT_BLOB) { verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey)); } else { // can't really verify much in the other cases other than that the put operations should // work and not corrupt files verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey)); } } finally { executor.shutdownNow(); } }
Example 19
Source File: BlobServerGetTest.java From flink with Apache License 2.0 | 4 votes |
/** * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to * download a blob. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentGetOperations( @Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); final BlobStore blobStore = mock(BlobStore.class); final int numberConcurrentGetOperations = 3; final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations); final byte[] data = {1, 2, 3, 4, 99, 42}; doAnswer( new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { File targetFile = (File) invocation.getArguments()[2]; FileUtils.writeByteArrayToFile(targetFile, data); return null; } } ).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class)); final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); // upload data first final BlobKey blobKey = put(server, jobId, data, blobType); // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!) if (blobType == PERMANENT_BLOB) { // remove local copy so that a transfer from HA store takes place assertTrue(server.getStorageLocation(jobId, blobKey).delete()); } for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<File> getOperation = CompletableFuture.supplyAsync( () -> { try { File file = get(server, jobId, blobKey); // check that we have read the right data validateGetAndClose(new FileInputStream(file), data); return file; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not read blob for key " + blobKey + '.', e)); } }, executor); getOperations.add(getOperation); } CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); } finally { executor.shutdownNow(); } }
Example 20
Source File: BlobCacheCleanupTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that {@link TransientBlobCache} cleans up after a default TTL and keeps files which are * constantly accessed. */ private void testTransientBlobCleanup(@Nullable final JobID jobId) throws IOException, InterruptedException, ExecutionException { // 1s should be a safe-enough buffer to still check for existence after a BLOB's last access long cleanupInterval = 1L; // in seconds final int numberConcurrentGetOperations = 3; final List<CompletableFuture<Void>> getOperations = new ArrayList<>(numberConcurrentGetOperations); byte[] data = new byte[2000000]; rnd.nextBytes(data); byte[] data2 = Arrays.copyOfRange(data, 10, 54); Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); config.setLong(BlobServerOptions.CLEANUP_INTERVAL, cleanupInterval); long cleanupLowerBound; try ( BlobServer server = new BlobServer(config, new VoidBlobStore()); final BlobCacheService cache = new BlobCacheService( config, new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort()) )) { ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> transientBlobExpiryTimes = cache.getTransientBlobService().getBlobExpiryTimes(); server.start(); final TransientBlobKey key1 = (TransientBlobKey) put(server, jobId, data, TRANSIENT_BLOB); final TransientBlobKey key2 = (TransientBlobKey) put(server, jobId, data2, TRANSIENT_BLOB); // access key1, verify expiry times cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key1, data); final Long key1ExpiryFirstAccess = transientBlobExpiryTimes.get(Tuple2.of(jobId, key1)); assertThat(key1ExpiryFirstAccess, greaterThanOrEqualTo(cleanupLowerBound)); assertNull(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2))); // access key2, verify expiry times (delay at least 1ms to also verify key1 expiry is unchanged) Thread.sleep(1); cleanupLowerBound = System.currentTimeMillis() + cleanupInterval; verifyContents(cache, jobId, key2, data2); assertEquals(key1ExpiryFirstAccess, transientBlobExpiryTimes.get(Tuple2.of(jobId, key1))); assertThat(transientBlobExpiryTimes.get(Tuple2.of(jobId, key2)), greaterThanOrEqualTo(cleanupLowerBound)); // files are cached now for the given TTL - remove from server so that they are not re-downloaded if (jobId != null) { server.cleanupJob(jobId, true); } else { server.deleteFromCache(key1); server.deleteFromCache(key2); } checkFileCountForJob(0, jobId, server); // cleanup task is run every cleanupInterval seconds // => unaccessed file should remain at most 2*cleanupInterval seconds // (use 3*cleanupInterval to check that we can still access it) final long finishTime = System.currentTimeMillis() + 3 * cleanupInterval; final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations); for (int i = 0; i < numberConcurrentGetOperations; i++) { CompletableFuture<Void> getOperation = CompletableFuture .supplyAsync( () -> { try { // constantly access key1 so this should not get deleted while (System.currentTimeMillis() < finishTime) { get(cache, jobId, key1); } return null; } catch (IOException e) { throw new CompletionException(new FlinkException( "Could not retrieve blob.", e)); } }, executor); getOperations.add(getOperation); } FutureUtils.ConjunctFuture<Collection<Void>> filesFuture = FutureUtils.combineAll(getOperations); filesFuture.get(); verifyDeletedEventually(server, jobId, key1, key2); } }