Java Code Examples for org.apache.flink.api.common.time.Deadline#fromNow()
The following examples show how to use
org.apache.flink.api.common.time.Deadline#fromNow() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LeaderChangeClusterComponentsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testTaskExecutorsReconnectToClusterWithLeadershipChange() throws Exception { final Deadline deadline = Deadline.fromNow(TESTING_TIMEOUT); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); highAvailabilityServices.revokeResourceManagerLeadership().get(); highAvailabilityServices.grantResourceManagerLeadership(); // wait for the ResourceManager to confirm the leadership assertThat( LeaderRetrievalUtils.retrieveLeaderConnectionInfo( highAvailabilityServices.getResourceManagerLeaderRetriever(), TESTING_TIMEOUT).getLeaderSessionId(), is(notNullValue())); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); }
Example 2
Source File: SystemProcessingTimeService.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean shutdownServiceUninterruptible(long timeoutMs) { final Deadline deadline = Deadline.fromNow(Duration.ofMillis(timeoutMs)); boolean shutdownComplete = false; boolean receivedInterrupt = false; do { try { // wait for a reasonable time for all pending timer threads to finish shutdownComplete = shutdownAndAwaitPending(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (InterruptedException iex) { receivedInterrupt = true; LOG.trace("Intercepted attempt to interrupt timer service shutdown.", iex); } } while (deadline.hasTimeLeft() && !shutdownComplete); if (receivedInterrupt) { Thread.currentThread().interrupt(); } return shutdownComplete; }
Example 3
Source File: LeaderChangeClusterComponentsTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testTaskExecutorsReconnectToClusterWithLeadershipChange() throws Exception { final Deadline deadline = Deadline.fromNow(TESTING_TIMEOUT); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); highAvailabilityServices.revokeResourceManagerLeadership().get(); highAvailabilityServices.grantResourceManagerLeadership(); // wait for the ResourceManager to confirm the leadership assertThat(LeaderRetrievalUtils.retrieveLeaderConnectionInfo(highAvailabilityServices.getResourceManagerLeaderRetriever(), Time.minutes(TESTING_TIMEOUT.toMinutes())).getLeaderSessionID(), is(notNullValue())); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); }
Example 4
Source File: FlinkPortableClientEntryPoint.java From beam with Apache License 2.0 | 5 votes |
private void startJobService() throws Exception { jobInvokerFactory = new DetachedJobInvokerFactory(); jobServer = FlinkJobServerDriver.fromConfig( FlinkJobServerDriver.parseArgs( new String[] {"--job-port=" + jobPort, "--artifact-port=0", "--expansion-port=0"}), jobInvokerFactory); jobServerThread = new Thread(jobServer); jobServerThread.start(); Deadline deadline = Deadline.fromNow(JOB_SERVICE_STARTUP_TIMEOUT); while (jobServer.getJobServerUrl() == null && deadline.hasTimeLeft()) { try { Thread.sleep(500); } catch (InterruptedException interruptEx) { Thread.currentThread().interrupt(); throw new RuntimeException(interruptEx); } } if (!jobServerThread.isAlive()) { throw new IllegalStateException("Job service thread is not alive"); } if (jobServer.getJobServerUrl() == null) { String msg = String.format("Timeout of %s waiting for job service to start.", deadline); throw new TimeoutException(msg); } }
Example 5
Source File: JobMasterStopWithSavepointIT.java From flink with Apache License 2.0 | 5 votes |
private void throwingExceptionOnCallbackWithRestartsHelper(final boolean terminate) throws Exception { final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10)); final int numberOfCheckpointsToExpect = 10; numberOfRestarts = new CountDownLatch(2); checkpointsToWaitFor = new CountDownLatch(numberOfCheckpointsToExpect); setUpJobGraph(ExceptionOnCallbackStreamTask.class, RestartStrategies.fixedDelayRestart(15, Time.milliseconds(10))); assertThat(getJobStatus(), equalTo(JobStatus.RUNNING)); try { stopWithSavepoint(terminate).get(50, TimeUnit.MILLISECONDS); fail(); } catch (Exception e) { // expected } // wait until we restart at least 2 times and until we see at least 10 checkpoints. numberOfRestarts.await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); checkpointsToWaitFor.await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); // verifying that we actually received a synchronous checkpoint assertTrue(syncSavepointId.get() > 0); assertThat(getJobStatus(), equalTo(JobStatus.RUNNING)); // make sure that we saw the synchronous savepoint and // that after that we saw more checkpoints due to restarts. final long syncSavepoint = syncSavepointId.get(); assertTrue(syncSavepoint > 0 && syncSavepoint < numberOfCheckpointsToExpect); clusterClient.cancel(jobGraph.getJobID()); assertThat(getJobStatus(), either(equalTo(JobStatus.CANCELLING)).or(equalTo(JobStatus.CANCELED))); }
Example 6
Source File: BackPressureITCase.java From flink with Apache License 2.0 | 5 votes |
private void assertJobVertexSubtasksAreBackPressured(final JobVertex jobVertex) throws Exception { try { final Deadline timeout = Deadline.fromNow(Duration.ofMillis(TASKS_BECOMING_BACK_PRESSURED_TIMEOUT_MS)); waitUntilCondition( isJobVertexBackPressured(jobVertex), timeout, BACK_PRESSURE_REQUEST_INTERVAL_MS); } catch (final TimeoutException e) { final String errorMessage = String.format("Subtasks of job vertex %s were not back pressured within timeout", jobVertex); throw new AssertionError(errorMessage, e); } }
Example 7
Source File: JobMasterStopWithSavepointIT.java From flink with Apache License 2.0 | 5 votes |
private void throwingExceptionOnCallbackWithRestartsHelper(final boolean terminate) throws Exception { final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10)); final int numberOfCheckpointsToExpect = 10; numberOfRestarts = new CountDownLatch(2); checkpointsToWaitFor = new CountDownLatch(numberOfCheckpointsToExpect); setUpJobGraph(ExceptionOnCallbackStreamTask.class, RestartStrategies.fixedDelayRestart(15, Time.milliseconds(10))); assertThat(getJobStatus(), equalTo(JobStatus.RUNNING)); try { stopWithSavepoint(terminate).get(50, TimeUnit.MILLISECONDS); fail(); } catch (Exception e) { // expected } // wait until we restart at least 2 times and until we see at least 10 checkpoints. assertTrue(numberOfRestarts.await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS)); assertTrue(checkpointsToWaitFor.await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS)); // verifying that we actually received a synchronous checkpoint assertTrue(syncSavepointId.get() > 0); assertThat(getJobStatus(), equalTo(JobStatus.RUNNING)); // make sure that we saw the synchronous savepoint and // that after that we saw more checkpoints due to restarts. final long syncSavepoint = syncSavepointId.get(); assertTrue(syncSavepoint > 0 && syncSavepoint < numberOfCheckpointsToExpect); clusterClient.cancel(jobGraph.getJobID()).get(); assertThat(getJobStatus(), either(equalTo(JobStatus.CANCELLING)).or(equalTo(JobStatus.CANCELED))); }
Example 8
Source File: RecordEmitterTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { TestRecordEmitter emitter = new TestRecordEmitter(); final TimestampedValue<String> one = new TimestampedValue<>("one", 1); final TimestampedValue<String> two = new TimestampedValue<>("two", 2); final TimestampedValue<String> five = new TimestampedValue<>("five", 5); final TimestampedValue<String> ten = new TimestampedValue<>("ten", 10); final RecordEmitter.RecordQueue<TimestampedValue> queue0 = emitter.getQueue(0); final RecordEmitter.RecordQueue<TimestampedValue> queue1 = emitter.getQueue(1); queue0.put(one); queue0.put(five); queue0.put(ten); queue1.put(two); ExecutorService executor = Executors.newSingleThreadExecutor(); executor.submit(emitter); Deadline dl = Deadline.fromNow(Duration.ofSeconds(10)); while (emitter.results.size() != 4 && dl.hasTimeLeft()) { Thread.sleep(10); } emitter.stop(); executor.shutdownNow(); Assert.assertThat(emitter.results, Matchers.contains(one, five, two, ten)); }
Example 9
Source File: LeaderChangeClusterComponentsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testTaskExecutorsReconnectToClusterWithLeadershipChange() throws Exception { final Deadline deadline = Deadline.fromNow(TESTING_TIMEOUT); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); highAvailabilityServices.revokeResourceManagerLeadership().get(); highAvailabilityServices.grantResourceManagerLeadership(); // wait for the ResourceManager to confirm the leadership assertThat(LeaderRetrievalUtils.retrieveLeaderConnectionInfo(highAvailabilityServices.getResourceManagerLeaderRetriever(), Time.minutes(TESTING_TIMEOUT.toMinutes())).getLeaderSessionID(), is(notNullValue())); waitUntilTaskExecutorsHaveConnected(NUM_TMS, deadline); }
Example 10
Source File: SQLClientHBaseITCase.java From flink with Apache License 2.0 | 5 votes |
private void checkHBaseSinkResult() throws Exception { boolean success = false; final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120)); while (deadline.hasTimeLeft()) { final List<String> lines = hbase.scanTable("sink"); if (lines.size() == 6) { success = true; assertThat( lines.toArray(new String[0]), arrayContainingInAnyOrder( CoreMatchers.allOf(containsString("row1"), containsString("family1"), containsString("f1c1"), containsString("value1")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c1"), containsString("v2")), CoreMatchers.allOf(containsString("row1"), containsString("family2"), containsString("f2c2"), containsString("v3")), CoreMatchers.allOf(containsString("row2"), containsString("family1"), containsString("f1c1"), containsString("value4")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c1"), containsString("v5")), CoreMatchers.allOf(containsString("row2"), containsString("family2"), containsString("f2c2"), containsString("v6")) ) ); break; } else { LOG.info("The HBase sink table does not contain enough records, current {} records, left time: {}s", lines.size(), deadline.timeLeft().getSeconds()); } Thread.sleep(500); } Assert.assertTrue("Did not get expected results before timeout.", success); }
Example 11
Source File: JobMasterTest.java From flink with Apache License 2.0 | 5 votes |
private void waitUntilAllExecutionsAreScheduled(final JobMasterGateway jobMasterGateway) throws Exception { final Duration duration = Duration.ofMillis(testingTimeout.toMilliseconds()); final Deadline deadline = Deadline.fromNow(duration); CommonTestUtils.waitUntilCondition( () -> getExecutions(jobMasterGateway).stream().allMatch(execution -> execution.getState() == ExecutionState.SCHEDULED), deadline); }
Example 12
Source File: KinesisExampleTest.java From flink with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { LOG.info("System properties: {}", System.getProperties()); final ParameterTool parameterTool = ParameterTool.fromArgs(args); String inputStream = parameterTool.getRequired("input-stream"); String outputStream = parameterTool.getRequired("output-stream"); KinesisPubsubClient pubsub = new KinesisPubsubClient(parameterTool.getProperties()); pubsub.createTopic(inputStream, 2, parameterTool.getProperties()); pubsub.createTopic(outputStream, 2, parameterTool.getProperties()); // The example job needs to start after streams are created and run in parallel to the validation logic. // The thread that runs the job won't terminate, we don't have a job reference to cancel it. // Once results are validated, the driver main thread will exit; job/cluster will be terminated from script. final AtomicReference<Exception> executeException = new AtomicReference<>(); Thread executeThread = new Thread( () -> { try { KinesisExample.main(args); // this message won't appear in the log, // job is terminated when shutting down cluster LOG.info("executed program"); } catch (Exception e) { executeException.set(e); } }); executeThread.start(); // generate input String[] messages = { "elephant,5,45218", "squirrel,12,46213", "bee,3,51348", "squirrel,22,52444", "bee,10,53412", "elephant,9,54867" }; for (String msg : messages) { pubsub.sendMessage(inputStream, msg); } LOG.info("generated records"); Deadline deadline = Deadline.fromNow(Duration.ofSeconds(60)); List<String> results = pubsub.readAllMessages(outputStream); while (deadline.hasTimeLeft() && executeException.get() == null && results.size() < messages.length) { LOG.info("waiting for results.."); Thread.sleep(1000); results = pubsub.readAllMessages(outputStream); } if (executeException.get() != null) { throw executeException.get(); } LOG.info("results: {}", results); Assert.assertEquals("Results received from '" + outputStream + "': " + results, messages.length, results.size()); String[] expectedResults = { "elephant,5,45218", "elephant,14,54867", "squirrel,12,46213", "squirrel,34,52444", "bee,3,51348", "bee,13,53412" }; for (String expectedResult : expectedResults) { Assert.assertTrue(expectedResult, results.contains(expectedResult)); } // TODO: main thread needs to create job or CLI fails with: // "The program didn't contain a Flink job. Perhaps you forgot to call execute() on the execution environment." System.out.println("test finished"); System.exit(0); }
Example 13
Source File: NetworkBufferPool.java From flink with Apache License 2.0 | 4 votes |
@Override public List<MemorySegment> requestMemorySegments() throws IOException { synchronized (factoryLock) { if (isDestroyed) { throw new IllegalStateException("Network buffer pool has already been destroyed."); } tryRedistributeBuffers(); } final List<MemorySegment> segments = new ArrayList<>(numberOfSegmentsToRequest); try { final Deadline deadline = Deadline.fromNow(requestSegmentsTimeout); while (true) { if (isDestroyed) { throw new IllegalStateException("Buffer pool is destroyed."); } MemorySegment segment; synchronized (availableMemorySegments) { if ((segment = internalRequestMemorySegment()) == null) { availableMemorySegments.wait(2000); } } if (segment != null) { segments.add(segment); } if (segments.size() >= numberOfSegmentsToRequest) { break; } if (!deadline.hasTimeLeft()) { throw new IOException(String.format("Timeout triggered when requesting exclusive buffers: %s, " + " or you may increase the timeout which is %dms by setting the key '%s'.", getConfigDescription(), requestSegmentsTimeout.toMillis(), NettyShuffleEnvironmentOptions.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS.key())); } } } catch (Throwable e) { try { recycleMemorySegments(segments, numberOfSegmentsToRequest); } catch (IOException inner) { e.addSuppressed(inner); } ExceptionUtils.rethrowIOException(e); } return segments; }
Example 14
Source File: KinesisExampleTest.java From flink with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { LOG.info("System properties: {}", System.getProperties()); final ParameterTool parameterTool = ParameterTool.fromArgs(args); String inputStream = parameterTool.getRequired("input-stream"); String outputStream = parameterTool.getRequired("output-stream"); PubsubClient pubsub = new KinesisPubsubClient(parameterTool.getProperties()); pubsub.createTopic(inputStream, 2, parameterTool.getProperties()); pubsub.createTopic(outputStream, 2, parameterTool.getProperties()); // The example job needs to start after streams are created and run in parallel to the validation logic. // The thread that runs the job won't terminate, we don't have a job reference to cancel it. // Once results are validated, the driver main thread will exit; job/cluster will be terminated from script. final AtomicReference<Exception> executeException = new AtomicReference<>(); Thread executeThread = new Thread( () -> { try { KinesisExample.main(args); // this message won't appear in the log, // job is terminated when shutting down cluster LOG.info("executed program"); } catch (Exception e) { executeException.set(e); } }); executeThread.start(); // generate input String[] messages = { "elephant,5,45218", "squirrel,12,46213", "bee,3,51348", "squirrel,22,52444", "bee,10,53412", "elephant,9,54867" }; for (String msg : messages) { pubsub.sendMessage(inputStream, msg); } LOG.info("generated records"); Deadline deadline = Deadline.fromNow(Duration.ofSeconds(60)); List<String> results = pubsub.readAllMessages(outputStream); while (deadline.hasTimeLeft() && executeException.get() == null && results.size() < messages.length) { LOG.info("waiting for results.."); Thread.sleep(1000); results = pubsub.readAllMessages(outputStream); } if (executeException.get() != null) { throw executeException.get(); } LOG.info("results: {}", results); Assert.assertEquals("Results received from '" + outputStream + "': " + results, messages.length, results.size()); String[] expectedResults = { "elephant,5,45218", "elephant,14,54867", "squirrel,12,46213", "squirrel,34,52444", "bee,3,51348", "bee,13,53412" }; for (String expectedResult : expectedResults) { Assert.assertTrue(expectedResult, results.contains(expectedResult)); } // TODO: main thread needs to create job or CLI fails with: // "The program didn't contain a Flink job. Perhaps you forgot to call execute() on the execution environment." System.out.println("test finished"); System.exit(0); }
Example 15
Source File: ZooKeeperLeaderElectionTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests repeatedly the reelection of still available LeaderContender. After a contender has * been elected as the leader, it is removed. This forces the ZooKeeperLeaderElectionService * to elect a new leader. */ @Test public void testZooKeeperReelection() throws Exception { Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5L)); int num = 10; ZooKeeperLeaderElectionService[] leaderElectionService = new ZooKeeperLeaderElectionService[num]; TestingContender[] contenders = new TestingContender[num]; ZooKeeperLeaderRetrievalService leaderRetrievalService = null; TestingListener listener = new TestingListener(); try { leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(client, configuration); LOG.debug("Start leader retrieval service for the TestingListener."); leaderRetrievalService.start(listener); for (int i = 0; i < num; i++) { leaderElectionService[i] = ZooKeeperUtils.createLeaderElectionService(client, configuration); contenders[i] = new TestingContender(createAddress(i), leaderElectionService[i]); LOG.debug("Start leader election service for contender #{}.", i); leaderElectionService[i].start(contenders[i]); } String pattern = TEST_URL + "_" + "(\\d+)"; Pattern regex = Pattern.compile(pattern); int numberSeenLeaders = 0; while (deadline.hasTimeLeft() && numberSeenLeaders < num) { LOG.debug("Wait for new leader #{}.", numberSeenLeaders); String address = listener.waitForNewLeader(deadline.timeLeft().toMillis()); Matcher m = regex.matcher(address); if (m.find()) { int index = Integer.parseInt(m.group(1)); TestingContender contender = contenders[index]; // check that the retrieval service has retrieved the correct leader if (address.equals(createAddress(index)) && listener.getLeaderSessionID().equals(contender.getLeaderSessionID())) { // kill the election service of the leader LOG.debug("Stop leader election service of contender #{}.", numberSeenLeaders); leaderElectionService[index].stop(); leaderElectionService[index] = null; numberSeenLeaders++; } } else { fail("Did not find the leader's index."); } } assertFalse("Did not complete the leader reelection in time.", deadline.isOverdue()); assertEquals(num, numberSeenLeaders); } finally { if (leaderRetrievalService != null) { leaderRetrievalService.stop(); } for (ZooKeeperLeaderElectionService electionService : leaderElectionService) { if (electionService != null) { electionService.stop(); } } } }
Example 16
Source File: SavepointReaderITTestBase.java From flink with Apache License 2.0 | 4 votes |
private String takeSavepoint(JobGraph jobGraph) throws Exception { SavepointSource.initializeForTest(); ClusterClient<?> client = miniClusterResource.getClusterClient(); client.setDetached(true); JobID jobId = jobGraph.getJobID(); Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5)); String dirPath = getTempDirPath(new AbstractID().toHexString()); try { client.setDetached(true); JobSubmissionResult result = client.submitJob(jobGraph, SavepointReaderITCase.class.getClassLoader()); boolean finished = false; while (deadline.hasTimeLeft()) { if (SavepointSource.isFinished()) { finished = true; break; } try { Thread.sleep(2L); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); } } if (!finished) { Assert.fail("Failed to initialize state within deadline"); } CompletableFuture<String> path = client.triggerSavepoint(result.getJobID(), dirPath); return path.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } finally { client.cancel(jobId); } }
Example 17
Source File: FlinkKinesisConsumerTest.java From flink with Apache License 2.0 | 4 votes |
private void awaitRecordCount(ConcurrentLinkedQueue<? extends Object> queue, int count) throws Exception { Deadline deadline = Deadline.fromNow(Duration.ofSeconds(10)); while (deadline.hasTimeLeft() && queue.size() < count) { Thread.sleep(10); } }
Example 18
Source File: SavepointMigrationTestBase.java From flink with Apache License 2.0 | 4 votes |
@SafeVarargs protected final void executeAndSavepoint( StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception { final Deadline deadLine = Deadline.fromNow(Duration.ofMinutes(5)); ClusterClient<?> client = miniClusterResource.getClusterClient(); // Submit the job JobGraph jobGraph = env.getStreamGraph().getJobGraph(); JobSubmissionResult jobSubmissionResult = ClientUtils.submitJob(client, jobGraph); LOG.info("Submitted job {} and waiting...", jobSubmissionResult.getJobID()); boolean done = false; while (deadLine.hasTimeLeft()) { Thread.sleep(100); Map<String, Object> accumulators = client.getAccumulators(jobSubmissionResult.getJobID()).get(); boolean allDone = true; for (Tuple2<String, Integer> acc : expectedAccumulators) { Object accumOpt = accumulators.get(acc.f0); if (accumOpt == null) { allDone = false; break; } Integer numFinished = (Integer) accumOpt; if (!numFinished.equals(acc.f1)) { allDone = false; break; } } if (allDone) { done = true; break; } } if (!done) { fail("Did not see the expected accumulator results within time limit."); } LOG.info("Triggering savepoint."); CompletableFuture<String> savepointPathFuture = client.triggerSavepoint(jobSubmissionResult.getJobID(), null); String jobmanagerSavepointPath = savepointPathFuture.get(deadLine.timeLeft().toMillis(), TimeUnit.MILLISECONDS); File jobManagerSavepoint = new File(new URI(jobmanagerSavepointPath).getPath()); // savepoints were changed to be directories in Flink 1.3 if (jobManagerSavepoint.isDirectory()) { FileUtils.moveDirectory(jobManagerSavepoint, new File(savepointPath)); } else { FileUtils.moveFile(jobManagerSavepoint, new File(savepointPath)); } }
Example 19
Source File: KinesisExampleTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { LOG.info("System properties: {}", System.getProperties()); final ParameterTool parameterTool = ParameterTool.fromArgs(args); String inputStream = parameterTool.getRequired("input-stream"); String outputStream = parameterTool.getRequired("output-stream"); PubsubClient pubsub = new KinesisPubsubClient(parameterTool.getProperties()); pubsub.createTopic(inputStream, 2, parameterTool.getProperties()); pubsub.createTopic(outputStream, 2, parameterTool.getProperties()); // The example job needs to start after streams are created and run in parallel to the validation logic. // The thread that runs the job won't terminate, we don't have a job reference to cancel it. // Once results are validated, the driver main thread will exit; job/cluster will be terminated from script. final AtomicReference<Exception> executeException = new AtomicReference<>(); Thread executeThread = new Thread( () -> { try { KinesisExample.main(args); // this message won't appear in the log, // job is terminated when shutting down cluster LOG.info("executed program"); } catch (Exception e) { executeException.set(e); } }); executeThread.start(); // generate input String[] messages = { "elephant,5,45218", "squirrel,12,46213", "bee,3,51348", "squirrel,22,52444", "bee,10,53412", "elephant,9,54867" }; for (String msg : messages) { pubsub.sendMessage(inputStream, msg); } LOG.info("generated records"); Deadline deadline = Deadline.fromNow(Duration.ofSeconds(60)); List<String> results = pubsub.readAllMessages(outputStream); while (deadline.hasTimeLeft() && executeException.get() == null && results.size() < messages.length) { LOG.info("waiting for results.."); Thread.sleep(1000); results = pubsub.readAllMessages(outputStream); } if (executeException.get() != null) { throw executeException.get(); } LOG.info("results: {}", results); Assert.assertEquals("Results received from '" + outputStream + "': " + results, messages.length, results.size()); String[] expectedResults = { "elephant,5,45218", "elephant,14,54867", "squirrel,12,46213", "squirrel,34,52444", "bee,3,51348", "bee,13,53412" }; for (String expectedResult : expectedResults) { Assert.assertTrue(expectedResult, results.contains(expectedResult)); } // TODO: main thread needs to create job or CLI fails with: // "The program didn't contain a Flink job. Perhaps you forgot to call execute() on the execution environment." System.out.println("test finished"); System.exit(0); }
Example 20
Source File: SavepointMigrationTestBase.java From flink with Apache License 2.0 | 4 votes |
@SafeVarargs protected final void restoreAndExecute( StreamExecutionEnvironment env, String savepointPath, Tuple2<String, Integer>... expectedAccumulators) throws Exception { final Deadline deadLine = Deadline.fromNow(Duration.ofMinutes(5)); ClusterClient<?> client = miniClusterResource.getClusterClient(); // Submit the job JobGraph jobGraph = env.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); JobSubmissionResult jobSubmissionResult = ClientUtils.submitJob(client, jobGraph); boolean done = false; while (deadLine.hasTimeLeft()) { // try and get a job result, this will fail if the job already failed. Use this // to get out of this loop JobID jobId = jobSubmissionResult.getJobID(); try { CompletableFuture<JobStatus> jobStatusFuture = client.getJobStatus(jobSubmissionResult.getJobID()); JobStatus jobStatus = jobStatusFuture.get(5, TimeUnit.SECONDS); assertNotEquals(JobStatus.FAILED, jobStatus); } catch (Exception e) { fail("Could not connect to job: " + e); } Thread.sleep(100); Map<String, Object> accumulators = client.getAccumulators(jobId).get(); boolean allDone = true; for (Tuple2<String, Integer> acc : expectedAccumulators) { Object numFinished = accumulators.get(acc.f0); if (numFinished == null) { allDone = false; break; } if (!numFinished.equals(acc.f1)) { allDone = false; break; } } if (allDone) { done = true; break; } } if (!done) { fail("Did not see the expected accumulator results within time limit."); } }