org.apache.flink.api.common.time.Deadline Java Examples
The following examples show how to use
org.apache.flink.api.common.time.Deadline.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SystemProcessingTimeService.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public boolean shutdownServiceUninterruptible(long timeoutMs) { final Deadline deadline = Deadline.fromNow(Duration.ofMillis(timeoutMs)); boolean shutdownComplete = false; boolean receivedInterrupt = false; do { try { // wait for a reasonable time for all pending timer threads to finish shutdownComplete = shutdownAndAwaitPending(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException iex) { receivedInterrupt = true; LOG.trace("Intercepted attempt to interrupt timer service shutdown.", iex); } } while (deadline.hasTimeLeft() && !shutdownComplete); if (receivedInterrupt) { Thread.currentThread().interrupt(); } return shutdownComplete; }
Example #2
Source File: YARNITCase.java From flink with Apache License 2.0 | 6 votes |
private void waitApplicationFinishedElseKillIt( ApplicationId applicationId, Duration timeout, YarnClusterDescriptor yarnClusterDescriptor) throws Exception { Deadline deadline = Deadline.now().plus(timeout); YarnApplicationState state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState(); while (state != YarnApplicationState.FINISHED) { if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { Assert.fail("Application became FAILED or KILLED while expecting FINISHED"); } if (deadline.isOverdue()) { yarnClusterDescriptor.killCluster(applicationId); Assert.fail("Application didn't finish before timeout"); } sleep(sleepIntervalInMS); state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState(); } }
Example #3
Source File: LeaderChangeClusterComponentsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testTaskExecutorsReconnectToClusterWithLeadershipChange() throws Exception { final Deadline deadline = Deadline.fromNow(TESTING_TIMEOUT); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); highAvailabilityServices.revokeResourceManagerLeadership().get(); highAvailabilityServices.grantResourceManagerLeadership(); // wait for the ResourceManager to confirm the leadership assertThat( LeaderRetrievalUtils.retrieveLeaderConnectionInfo( highAvailabilityServices.getResourceManagerLeaderRetriever(), TESTING_TIMEOUT).getLeaderSessionId(), is(notNullValue())); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); }
Example #4
Source File: FutureUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Retry the given operation with the given delay in between successful completions where the * result does not match a given predicate. * * @param operation to retry * @param retryDelay delay between retries * @param deadline A deadline that specifies at what point we should stop retrying * @param acceptancePredicate Predicate to test whether the result is acceptable * @param scheduledExecutor executor to be used for the retry operation * @param <T> type of the result * @return Future which retries the given operation a given amount of times and delays the retry * in case the predicate isn't matched */ public static <T> CompletableFuture<T> retrySuccessfulWithDelay( final Supplier<CompletableFuture<T>> operation, final Time retryDelay, final Deadline deadline, final Predicate<T> acceptancePredicate, final ScheduledExecutor scheduledExecutor) { final CompletableFuture<T> resultFuture = new CompletableFuture<>(); retrySuccessfulOperationWithDelay( resultFuture, operation, retryDelay, deadline, acceptancePredicate, scheduledExecutor); return resultFuture; }
Example #5
Source File: AbstractOperatorRestoreTestBase.java From flink with Apache License 2.0 | 6 votes |
private void restoreJob(ClusterClient<?> clusterClient, Deadline deadline, String savepointPath) throws Exception { JobGraph jobToRestore = createJobGraph(ExecutionMode.RESTORE); jobToRestore.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState)); assertNotNull("Job doesn't have a JobID.", jobToRestore.getJobID()); ClientUtils.submitJob(clusterClient, jobToRestore); CompletableFuture<JobStatus> jobStatusFuture = FutureUtils.retrySuccessfulWithDelay( () -> clusterClient.getJobStatus(jobToRestore.getJobID()), Time.milliseconds(50), deadline, (jobStatus) -> jobStatus == JobStatus.FINISHED, TestingUtils.defaultScheduledExecutor()); assertEquals( JobStatus.FINISHED, jobStatusFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS)); }
Example #6
Source File: AbstractOperatorRestoreTestBase.java From flink with Apache License 2.0 | 6 votes |
private void restoreJob(ClassLoader classLoader, ClusterClient<?> clusterClient, Deadline deadline, String savepointPath) throws Exception { JobGraph jobToRestore = createJobGraph(ExecutionMode.RESTORE); jobToRestore.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState)); assertNotNull("Job doesn't have a JobID.", jobToRestore.getJobID()); clusterClient.submitJob(jobToRestore, classLoader); CompletableFuture<JobStatus> jobStatusFuture = FutureUtils.retrySuccessfulWithDelay( () -> clusterClient.getJobStatus(jobToRestore.getJobID()), Time.milliseconds(50), deadline, (jobStatus) -> jobStatus == JobStatus.FINISHED, TestingUtils.defaultScheduledExecutor()); assertEquals( JobStatus.FINISHED, jobStatusFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS)); }
Example #7
Source File: HadoopRecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Called when resuming execution after a failure and waits until the lease * of the file we are resuming is free. * * <p>The lease of the file we are resuming writing/committing to may still * belong to the process that failed previously and whose state we are * recovering. * * @param path The path to the file we want to resume writing to. */ private static boolean waitUntilLeaseIsRevoked(final FileSystem fs, final Path path) throws IOException { Preconditions.checkState(fs instanceof DistributedFileSystem); final DistributedFileSystem dfs = (DistributedFileSystem) fs; dfs.recoverLease(path); final Deadline deadline = Deadline.now().plus(Duration.ofMillis(LEASE_TIMEOUT)); final StopWatch sw = new StopWatch(); sw.start(); boolean isClosed = dfs.isFileClosed(path); while (!isClosed && deadline.hasTimeLeft()) { try { Thread.sleep(500L); } catch (InterruptedException e1) { throw new IOException("Recovering the lease failed: ", e1); } isClosed = dfs.isFileClosed(path); } return isClosed; }
Example #8
Source File: SystemProcessingTimeService.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean shutdownServiceUninterruptible(long timeoutMs) { final Deadline deadline = Deadline.fromNow(Duration.ofMillis(timeoutMs)); boolean shutdownComplete = false; boolean receivedInterrupt = false; do { try { // wait for a reasonable time for all pending timer threads to finish shutdownComplete = shutdownAndAwaitPending(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException iex) { receivedInterrupt = true; LOG.trace("Intercepted attempt to interrupt timer service shutdown.", iex); } } while (deadline.hasTimeLeft() && !shutdownComplete); if (receivedInterrupt) { Thread.currentThread().interrupt(); } return shutdownComplete; }
Example #9
Source File: ExecutionGraphTestUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Waits until all executions fulfill the given predicate. * * @param executionGraph for which to check the executions * @param executionPredicate predicate which is to be fulfilled * @param maxWaitMillis timeout for the wait operation * @throws TimeoutException if the executions did not reach the target state in time */ public static void waitForAllExecutionsPredicate( ExecutionGraph executionGraph, Predicate<AccessExecution> executionPredicate, long maxWaitMillis) throws TimeoutException { final Predicate<AccessExecutionGraph> allExecutionsPredicate = allExecutionsPredicate(executionPredicate); final Deadline deadline = Deadline.fromNow(Duration.ofMillis(maxWaitMillis)); boolean predicateResult; do { predicateResult = allExecutionsPredicate.test(executionGraph); if (!predicateResult) { try { Thread.sleep(2L); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } } } while (!predicateResult && deadline.hasTimeLeft()); if (!predicateResult) { throw new TimeoutException("Not all executions fulfilled the predicate in time."); } }
Example #10
Source File: AutoClosableProcess.java From flink with Apache License 2.0 | 6 votes |
public void runBlockingWithRetry(final int maxRetries, final Duration attemptTimeout, final Duration globalTimeout) throws IOException { int retries = 0; final Deadline globalDeadline = Deadline.fromNow(globalTimeout); while (true) { try { runBlocking(attemptTimeout); break; } catch (Exception e) { if (++retries > maxRetries || !globalDeadline.hasTimeLeft()) { String errMsg = String.format( "Process (%s) exceeded timeout (%s) or number of retries (%s).", Arrays.toString(commands), globalTimeout.toMillis(), maxRetries); throw new IOException(errMsg, e); } } } }
Example #11
Source File: ExecutionGraphTestUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Waits until all executions fulfill the given predicate. * * @param executionGraph for which to check the executions * @param executionPredicate predicate which is to be fulfilled * @param maxWaitMillis timeout for the wait operation * @throws TimeoutException if the executions did not reach the target state in time */ public static void waitForAllExecutionsPredicate( ExecutionGraph executionGraph, Predicate<AccessExecution> executionPredicate, long maxWaitMillis) throws TimeoutException { final Predicate<AccessExecutionGraph> allExecutionsPredicate = allExecutionsPredicate(executionPredicate); final Deadline deadline = Deadline.fromNow(Duration.ofMillis(maxWaitMillis)); boolean predicateResult; do { predicateResult = allExecutionsPredicate.test(executionGraph); if (!predicateResult) { try { Thread.sleep(2L); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } } } while (!predicateResult && deadline.hasTimeLeft()); if (!predicateResult) { throw new TimeoutException("Not all executions fulfilled the predicate in time."); } }
Example #12
Source File: YarnTestBase.java From flink with Apache License 2.0 | 6 votes |
protected void waitApplicationFinishedElseKillIt( ApplicationId applicationId, Duration timeout, YarnClusterDescriptor yarnClusterDescriptor, int sleepIntervalInMS) throws Exception { Deadline deadline = Deadline.now().plus(timeout); YarnApplicationState state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState(); while (state != YarnApplicationState.FINISHED) { if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { Assert.fail("Application became FAILED or KILLED while expecting FINISHED"); } if (deadline.isOverdue()) { yarnClusterDescriptor.killCluster(applicationId); Assert.fail("Application didn't finish before timeout"); } sleep(sleepIntervalInMS); state = getYarnClient().getApplicationReport(applicationId).getYarnApplicationState(); } }
Example #13
Source File: AbstractQueryableStateTestBase.java From flink with Apache License 2.0 | 6 votes |
private static <K, S extends State, V> CompletableFuture<S> getKvState( final Deadline deadline, final QueryableStateClient client, final JobID jobId, final String queryName, final K key, final TypeInformation<K> keyTypeInfo, final StateDescriptor<S, V> stateDescriptor, final boolean failForUnknownKeyOrNamespace, final ScheduledExecutor executor) { final CompletableFuture<S> resultFuture = new CompletableFuture<>(); getKvStateIgnoringCertainExceptions( deadline, resultFuture, client, jobId, queryName, key, keyTypeInfo, stateDescriptor, failForUnknownKeyOrNamespace, executor); return resultFuture; }
Example #14
Source File: AbstractQueryableStateTestBase.java From flink with Apache License 2.0 | 5 votes |
/** * Tests simple value state queryable state instance. Each source emits * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then * queried. The tests succeeds after each subtask index is queried with * value numElements (the latest element updated the state). */ @Test public void testValueState() throws Exception { final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); final long numElements = 1024L; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(stateBackend); env.setParallelism(maxParallelism); // Very important, because cluster is shared between tests and we // don't explicitly check that all slots are available before // submitting. env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L)); DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements)); // Value state ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType()); source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() { private static final long serialVersionUID = 7662520075515707428L; @Override public Integer getKey(Tuple2<Integer, Long> value) { return value.f0; } }).asQueryableState("hakuna", valueState); try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) { final JobID jobId = autoCancellableJob.getJobId(); final JobGraph jobGraph = autoCancellableJob.getJobGraph(); ClientUtils.submitJob(clusterClient, jobGraph); executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements); } }
Example #15
Source File: YARNHighAvailabilityITCase.java From flink with Apache License 2.0 | 5 votes |
private static void waitUntilJobIsRunning(RestClusterClient<ApplicationId> restClusterClient, JobID jobId) throws Exception { CommonTestUtils.waitUntilCondition( () -> { final JobDetailsInfo jobDetails = restClusterClient.getJobDetails(jobId).get(); return jobDetails.getJobVertexInfos() .stream() .map(toExecutionState()) .allMatch(isRunning()); }, Deadline.fromNow(TIMEOUT)); }
Example #16
Source File: YARNHighAvailabilityITCase.java From flink with Apache License 2.0 | 5 votes |
private void waitForApplicationAttempt(final ApplicationId applicationId, final int attemptId) throws Exception { final YarnClient yarnClient = getYarnClient(); checkState(yarnClient != null, "yarnClient must be initialized"); CommonTestUtils.waitUntilCondition(() -> { final ApplicationReport applicationReport = yarnClient.getApplicationReport(applicationId); return applicationReport.getCurrentApplicationAttemptId().getAttemptId() >= attemptId; }, Deadline.fromNow(TIMEOUT)); }
Example #17
Source File: YarnTestBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void close() throws Exception { Deadline deadline = Deadline.now().plus(Duration.ofSeconds(10)); boolean isAnyJobRunning = yarnClient.getApplications().stream() .anyMatch(YarnTestBase::isApplicationRunning); while (deadline.hasTimeLeft() && isAnyJobRunning) { try { Thread.sleep(500); } catch (InterruptedException e) { Assert.fail("Should not happen"); } isAnyJobRunning = yarnClient.getApplications().stream() .anyMatch(YarnTestBase::isApplicationRunning); } if (isAnyJobRunning) { final List<String> runningApps = yarnClient.getApplications().stream() .filter(YarnTestBase::isApplicationRunning) .map(app -> "App " + app.getApplicationId() + " is in state " + app.getYarnApplicationState() + '.') .collect(Collectors.toList()); if (!runningApps.isEmpty()) { Assert.fail("There is at least one application on the cluster that is not finished." + runningApps); } } }
Example #18
Source File: AbstractQueryableStateTestBase.java From flink with Apache License 2.0 | 5 votes |
private static <K, S extends State, V> void getKvStateIgnoringCertainExceptions( final Deadline deadline, final CompletableFuture<S> resultFuture, final QueryableStateClient client, final JobID jobId, final String queryName, final K key, final TypeInformation<K> keyTypeInfo, final StateDescriptor<S, V> stateDescriptor, final boolean failForUnknownKeyOrNamespace, final ScheduledExecutor executor) { if (!resultFuture.isDone()) { CompletableFuture<S> expected = client.getKvState(jobId, queryName, key, keyTypeInfo, stateDescriptor); expected.whenCompleteAsync((result, throwable) -> { if (throwable != null) { if ( throwable.getCause() instanceof CancellationException || throwable.getCause() instanceof AssertionError || (failForUnknownKeyOrNamespace && throwable.getCause() instanceof UnknownKeyOrNamespaceException) ) { resultFuture.completeExceptionally(throwable.getCause()); } else if (deadline.hasTimeLeft()) { getKvStateIgnoringCertainExceptions( deadline, resultFuture, client, jobId, queryName, key, keyTypeInfo, stateDescriptor, failForUnknownKeyOrNamespace, executor); } } else { resultFuture.complete(result); } }, executor); resultFuture.whenComplete((result, throwable) -> expected.cancel(false)); } }
Example #19
Source File: AbstractOperatorRestoreTestBase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMigrationAndRestore() throws Throwable { ClusterClient<?> clusterClient = cluster.getClusterClient(); final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); // submit job with old version savepoint and create a migrated savepoint in the new version String savepointPath = migrateJob(clusterClient, deadline); // restore from migrated new version savepoint restoreJob(clusterClient, deadline, savepointPath); }
Example #20
Source File: TaskExecutorITCase.java From flink with Apache License 2.0 | 5 votes |
private CompletableFuture<JobResult> submitJobAndWaitUntilRunning(JobGraph jobGraph) throws Exception { miniCluster.submitJob(jobGraph).get(); final CompletableFuture<JobResult> jobResultFuture = miniCluster.requestJobResult(jobGraph.getJobID()); assertThat(jobResultFuture.isDone(), is(false)); CommonTestUtils.waitUntilCondition( jobIsRunning(() -> miniCluster.getExecutionGraph(jobGraph.getJobID())), Deadline.fromNow(TESTING_TIMEOUT), 50L); return jobResultFuture; }
Example #21
Source File: YARNHighAvailabilityITCase.java From flink with Apache License 2.0 | 5 votes |
private static void waitUntilJobIsRestarted( final RestClusterClient<ApplicationId> restClusterClient, final JobID jobId, final int expectedFullRestarts) throws Exception { CommonTestUtils.waitUntilCondition( () -> getJobFullRestarts(restClusterClient, jobId) >= expectedFullRestarts, Deadline.fromNow(TIMEOUT)); }
Example #22
Source File: YARNHighAvailabilityITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static void waitUntilJobIsRunning(RestClusterClient<ApplicationId> restClusterClient, JobID jobId) throws Exception { CommonTestUtils.waitUntilCondition( () -> { final JobDetailsInfo jobDetails = restClusterClient.getJobDetails(jobId).get(); return jobDetails.getJobVertexInfos() .stream() .map(toExecutionState()) .allMatch(isRunning()); }, Deadline.fromNow(TIMEOUT)); }
Example #23
Source File: AbstractQueryableStateTestBase.java From flink with Apache License 2.0 | 5 votes |
/** * Tests simple value state queryable state instance. Each source emits * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then * queried. The tests succeeds after each subtask index is queried with * value numElements (the latest element updated the state). */ @Test public void testValueState() throws Exception { final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); final long numElements = 1024L; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(stateBackend); env.setParallelism(maxParallelism); // Very important, because cluster is shared between tests and we // don't explicitly check that all slots are available before // submitting. env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L)); DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements)); // Value state ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType()); source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() { private static final long serialVersionUID = 7662520075515707428L; @Override public Integer getKey(Tuple2<Integer, Long> value) { return value.f0; } }).asQueryableState("hakuna", valueState); try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) { final JobID jobId = autoCancellableJob.getJobId(); final JobGraph jobGraph = autoCancellableJob.getJobGraph(); clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader()); executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements); } }
Example #24
Source File: ProcessFailureCancelingITCase.java From flink with Apache License 2.0 | 5 votes |
private Collection<JobID> waitForRunningJobs(ClusterClient<?> clusterClient, Time timeout) throws ExecutionException, InterruptedException { return FutureUtils.retrySuccessfulWithDelay( CheckedSupplier.unchecked(clusterClient::listJobs), Time.milliseconds(50L), Deadline.fromNow(Duration.ofMillis(timeout.toMilliseconds())), jobs -> !jobs.isEmpty(), TestingUtils.defaultScheduledExecutor()) .get() .stream() .map(JobStatusMessage::getJobId) .collect(Collectors.toList()); }
Example #25
Source File: ProcessFailureCancelingITCase.java From flink with Apache License 2.0 | 5 votes |
private void waitUntilAllSlotsAreUsed(DispatcherGateway dispatcherGateway, Time timeout) throws ExecutionException, InterruptedException { FutureUtils.retrySuccessfulWithDelay( () -> dispatcherGateway.requestClusterOverview(timeout), Time.milliseconds(50L), Deadline.fromNow(Duration.ofMillis(timeout.toMilliseconds())), clusterOverview -> clusterOverview.getNumTaskManagersConnected() >= 1 && clusterOverview.getNumSlotsAvailable() == 0 && clusterOverview.getNumSlotsTotal() == 2, TestingUtils.defaultScheduledExecutor()) .get(); }
Example #26
Source File: BackPressureITCase.java From flink with Apache License 2.0 | 5 votes |
private void assertJobVertexSubtasksAreBackPressured(final JobVertex jobVertex) throws Exception { try { final Deadline timeout = Deadline.fromNow(Duration.ofMillis(TASKS_BECOMING_BACK_PRESSURED_TIMEOUT_MS)); waitUntilCondition( isJobVertexBackPressured(jobVertex), timeout, BACK_PRESSURE_REQUEST_INTERVAL_MS); } catch (final TimeoutException e) { final String errorMessage = String.format("Subtasks of job vertex %s were not back pressured within timeout", jobVertex); throw new AssertionError(errorMessage, e); } }
Example #27
Source File: SavepointITCase.java From flink with Apache License 2.0 | 5 votes |
private void restoreJobAndVerifyState(String savepointPath, MiniClusterResourceFactory clusterFactory, int parallelism) throws Exception { final JobGraph jobGraph = createJobGraph(parallelism, 0, 1000); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); final JobID jobId = jobGraph.getJobID(); StatefulCounter.resetForTest(parallelism); MiniClusterWithClientResource cluster = clusterFactory.get(); cluster.before(); ClusterClient<?> client = cluster.getClusterClient(); try { client.setDetached(true); client.submitJob(jobGraph, SavepointITCase.class.getClassLoader()); // Await state is restored StatefulCounter.getRestoreLatch().await(); // Await some progress after restore StatefulCounter.getProgressLatch().await(); client.cancel(jobId); FutureUtils.retrySuccessfulWithDelay( () -> client.getJobStatus(jobId), Time.milliseconds(50), Deadline.now().plus(Duration.ofSeconds(30)), status -> status == JobStatus.CANCELED, TestingUtils.defaultScheduledExecutor() ); client.disposeSavepoint(savepointPath) .get(); assertFalse("Savepoint not properly cleaned up.", new File(savepointPath).exists()); } finally { cluster.after(); StatefulCounter.resetForTest(parallelism); } }
Example #28
Source File: AbstractOperatorRestoreTestBase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testMigrationAndRestore() throws Throwable { ClassLoader classLoader = this.getClass().getClassLoader(); ClusterClient<?> clusterClient = cluster.getClusterClient(); clusterClient.setDetached(true); final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); // submit job with old version savepoint and create a migrated savepoint in the new version String savepointPath = migrateJob(classLoader, clusterClient, deadline); // restore from migrated new version savepoint restoreJob(classLoader, clusterClient, deadline, savepointPath); }
Example #29
Source File: YARNHighAvailabilityITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static void waitUntilJobIsRestarted( final RestClusterClient<ApplicationId> restClusterClient, final JobID jobId, final int expectedFullRestarts) throws Exception { CommonTestUtils.waitUntilCondition( () -> getJobFullRestarts(restClusterClient, jobId) >= expectedFullRestarts, Deadline.fromNow(TIMEOUT)); }
Example #30
Source File: AbstractOperatorRestoreTestBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testMigrationAndRestore() throws Throwable { ClassLoader classLoader = this.getClass().getClassLoader(); ClusterClient<?> clusterClient = MINI_CLUSTER_RESOURCE.getClusterClient(); clusterClient.setDetached(true); final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); // submit job with old version savepoint and create a migrated savepoint in the new version String savepointPath = migrateJob(classLoader, clusterClient, deadline); // restore from migrated new version savepoint restoreJob(classLoader, clusterClient, deadline, savepointPath); }