org.apache.flink.client.program.MiniClusterClient Java Examples
The following examples show how to use
org.apache.flink.client.program.MiniClusterClient.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ShuffleCompressionITCase.java From flink with Apache License 2.0 | 6 votes |
private void executeTest(JobGraph jobGraph) throws Exception { Configuration configuration = new Configuration(); configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g")); configuration.setBoolean(NettyShuffleEnvironmentOptions.BLOCKING_SHUFFLE_COMPRESSION_ENABLED, true); final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(NUM_TASKMANAGERS) .setNumSlotsPerTaskManager(NUM_SLOTS) .build(); try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster); // wait for the submission to succeed JobID jobID = miniClusterClient.submitJob(jobGraph).get(); CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobID); assertFalse(resultFuture.get().getSerializedThrowable().isPresent()); } }
Example #2
Source File: SchedulingITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void executeSchedulingTest(Configuration configuration) throws Exception { configuration.setString(RestOptions.BIND_PORT, "0"); final long slotIdleTimeout = 50L; configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout); final int parallelism = 4; final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(parallelism) .setNumSlotsPerTaskManager(1) .build(); try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster); JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism); CompletableFuture<JobSubmissionResult> submissionFuture = miniClusterClient.submitJob(jobGraph); // wait for the submission to succeed JobSubmissionResult jobSubmissionResult = submissionFuture.get(); CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobSubmissionResult.getJobID()); JobResult jobResult = resultFuture.get(); assertThat(jobResult.getSerializedThrowable().isPresent(), is(false)); } }
Example #3
Source File: SchedulingITCase.java From flink with Apache License 2.0 | 5 votes |
private void executeSchedulingTest(Configuration configuration) throws Exception { configuration.setString(RestOptions.BIND_PORT, "0"); final long slotIdleTimeout = 50L; configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout); final int parallelism = 4; final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(parallelism) .setNumSlotsPerTaskManager(1) .build(); try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster); JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism); CompletableFuture<JobSubmissionResult> submissionFuture = miniClusterClient.submitJob(jobGraph); // wait for the submission to succeed JobSubmissionResult jobSubmissionResult = submissionFuture.get(); CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobSubmissionResult.getJobID()); JobResult jobResult = resultFuture.get(); assertThat(jobResult.getSerializedThrowable().isPresent(), is(false)); } }
Example #4
Source File: FileBufferReaderITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testSequentialReading() throws Exception { // setup final Configuration configuration = new Configuration(); configuration.setString(RestOptions.BIND_PORT, "0"); configuration.setString(NettyShuffleEnvironmentOptions.NETWORK_BOUNDED_BLOCKING_SUBPARTITION_TYPE, "file"); final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(parallelism) .setNumSlotsPerTaskManager(1) .build(); try (final MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); final MiniClusterClient client = new MiniClusterClient(configuration, miniCluster); final JobGraph jobGraph = createJobGraph(); final CompletableFuture<JobSubmissionResult> submitFuture = client.submitJob(jobGraph); // wait for the submission to succeed final JobSubmissionResult result = submitFuture.get(); final CompletableFuture<JobResult> resultFuture = client.requestJobResult(result.getJobID()); final JobResult jobResult = resultFuture.get(); assertThat(jobResult.getSerializedThrowable().isPresent(), is(false)); } }
Example #5
Source File: SchedulingITCase.java From flink with Apache License 2.0 | 5 votes |
private void executeSchedulingTest(Configuration configuration) throws Exception { configuration.setString(RestOptions.BIND_PORT, "0"); final long slotIdleTimeout = 50L; configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout); configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g")); final int parallelism = 4; final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(parallelism) .setNumSlotsPerTaskManager(1) .build(); try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster); JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism); // wait for the submission to succeed JobID jobID = miniClusterClient.submitJob(jobGraph).get(); CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobID); JobResult jobResult = resultFuture.get(); assertThat(jobResult.getSerializedThrowable().isPresent(), is(false)); } }
Example #6
Source File: FileBufferReaderITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testSequentialReading() throws Exception { // setup final Configuration configuration = new Configuration(); configuration.setString(RestOptions.BIND_PORT, "0"); configuration.setString(NettyShuffleEnvironmentOptions.NETWORK_BLOCKING_SHUFFLE_TYPE, "file"); configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g")); final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumTaskManagers(parallelism) .setNumSlotsPerTaskManager(1) .build(); try (final MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) { miniCluster.start(); final MiniClusterClient client = new MiniClusterClient(configuration, miniCluster); final JobGraph jobGraph = createJobGraph(); // wait for the submission to succeed final JobID jobID = client.submitJob(jobGraph).get(); final CompletableFuture<JobResult> resultFuture = client.requestJobResult(jobID); final JobResult jobResult = resultFuture.get(); assertThat(jobResult.getSerializedThrowable().isPresent(), is(false)); } }
Example #7
Source File: MiniClusterWithClientResource.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private MiniClusterClient createMiniClusterClient() { return new MiniClusterClient(getClientConfiguration(), getMiniCluster()); }
Example #8
Source File: ClassLoaderITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests disposal of a savepoint, which contains custom user code KvState. */ @Test public void testDisposeSavepointWithCustomKvState() throws Exception { ClusterClient<?> clusterClient = new MiniClusterClient(new Configuration(), miniClusterResource.getMiniCluster()); Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow(); File checkpointDir = FOLDER.newFolder(); File outputDir = FOLDER.newFolder(); final PackagedProgram program = new PackagedProgram( new File(CUSTOM_KV_STATE_JAR_PATH), new String[] { String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString() }); TestStreamEnvironment.setAsContext( miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(CUSTOM_KV_STATE_JAR_PATH)), Collections.<URL>emptyList() ); // Execute detached Thread invokeThread = new Thread(new Runnable() { @Override public void run() { try { program.invokeInteractiveModeForExecution(); } catch (ProgramInvocationException ignored) { if (ignored.getCause() == null || !(ignored.getCause() instanceof JobCancellationException)) { ignored.printStackTrace(); } } } }); LOG.info("Starting program invoke thread"); invokeThread.start(); // The job ID JobID jobId = null; LOG.info("Waiting for job status running."); // Wait for running job while (jobId == null && deadline.hasTimeLeft()) { Collection<JobStatusMessage> jobs = clusterClient.listJobs().get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); for (JobStatusMessage job : jobs) { if (job.getJobState() == JobStatus.RUNNING) { jobId = job.getJobId(); LOG.info("Job running. ID: " + jobId); break; } } // Retry if job is not available yet if (jobId == null) { Thread.sleep(100L); } } // Trigger savepoint String savepointPath = null; for (int i = 0; i < 20; i++) { LOG.info("Triggering savepoint (" + (i + 1) + "/20)."); try { savepointPath = clusterClient.triggerSavepoint(jobId, null) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception cause) { LOG.info("Failed to trigger savepoint. Retrying...", cause); // This can fail if the operators are not opened yet Thread.sleep(500); } } assertNotNull("Failed to trigger savepoint", savepointPath); clusterClient.disposeSavepoint(savepointPath).get(); clusterClient.cancel(jobId); // make sure, the execution is finished to not influence other test methods invokeThread.join(deadline.timeLeft().toMillis()); assertFalse("Program invoke thread still running", invokeThread.isAlive()); }
Example #9
Source File: MiniClusterWithClientResource.java From flink with Apache License 2.0 | 4 votes |
private MiniClusterClient createMiniClusterClient() { return new MiniClusterClient(getClientConfiguration(), getMiniCluster()); }
Example #10
Source File: ClassLoaderITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Tests disposal of a savepoint, which contains custom user code KvState. */ @Test public void testDisposeSavepointWithCustomKvState() throws Exception { ClusterClient<?> clusterClient = new MiniClusterClient(new Configuration(), miniClusterResource.getMiniCluster()); Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow(); File checkpointDir = FOLDER.newFolder(); File outputDir = FOLDER.newFolder(); final PackagedProgram program = new PackagedProgram( new File(CUSTOM_KV_STATE_JAR_PATH), new String[] { String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString() }); TestStreamEnvironment.setAsContext( miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(CUSTOM_KV_STATE_JAR_PATH)), Collections.<URL>emptyList() ); // Execute detached Thread invokeThread = new Thread(new Runnable() { @Override public void run() { try { program.invokeInteractiveModeForExecution(); } catch (ProgramInvocationException ignored) { if (ignored.getCause() == null || !(ignored.getCause() instanceof JobCancellationException)) { ignored.printStackTrace(); } } } }); LOG.info("Starting program invoke thread"); invokeThread.start(); // The job ID JobID jobId = null; LOG.info("Waiting for job status running."); // Wait for running job while (jobId == null && deadline.hasTimeLeft()) { Collection<JobStatusMessage> jobs = clusterClient.listJobs().get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); for (JobStatusMessage job : jobs) { if (job.getJobState() == JobStatus.RUNNING) { jobId = job.getJobId(); LOG.info("Job running. ID: " + jobId); break; } } // Retry if job is not available yet if (jobId == null) { Thread.sleep(100L); } } // Trigger savepoint String savepointPath = null; for (int i = 0; i < 20; i++) { LOG.info("Triggering savepoint (" + (i + 1) + "/20)."); try { savepointPath = clusterClient.triggerSavepoint(jobId, null) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception cause) { LOG.info("Failed to trigger savepoint. Retrying...", cause); // This can fail if the operators are not opened yet Thread.sleep(500); } } assertNotNull("Failed to trigger savepoint", savepointPath); clusterClient.disposeSavepoint(savepointPath).get(); clusterClient.cancel(jobId); // make sure, the execution is finished to not influence other test methods invokeThread.join(deadline.timeLeft().toMillis()); assertFalse("Program invoke thread still running", invokeThread.isAlive()); }
Example #11
Source File: MiniClusterWithClientResource.java From flink with Apache License 2.0 | 4 votes |
private MiniClusterClient createMiniClusterClient() { return new MiniClusterClient(getClientConfiguration(), getMiniCluster()); }
Example #12
Source File: ClassLoaderITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Tests disposal of a savepoint, which contains custom user code KvState. */ @Test public void testDisposeSavepointWithCustomKvState() throws Exception { ClusterClient<?> clusterClient = new MiniClusterClient(new Configuration(), miniClusterResource.getMiniCluster()); Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow(); File checkpointDir = FOLDER.newFolder(); File outputDir = FOLDER.newFolder(); final PackagedProgram program = PackagedProgram.newBuilder() .setJarFile(new File(CUSTOM_KV_STATE_JAR_PATH)) .setArguments(new String[] { String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString(), "false" // Disable unaligned checkpoints as this test is triggering concurrent savepoints/checkpoints }) .build(); TestStreamEnvironment.setAsContext( miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(CUSTOM_KV_STATE_JAR_PATH)), Collections.emptyList() ); // Execute detached Thread invokeThread = new Thread(() -> { try { program.invokeInteractiveModeForExecution(); } catch (ProgramInvocationException ex) { if (ex.getCause() == null || !(ex.getCause() instanceof JobCancellationException)) { ex.printStackTrace(); } } }); LOG.info("Starting program invoke thread"); invokeThread.start(); // The job ID JobID jobId = null; LOG.info("Waiting for job status running."); // Wait for running job while (jobId == null && deadline.hasTimeLeft()) { Collection<JobStatusMessage> jobs = clusterClient.listJobs().get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); for (JobStatusMessage job : jobs) { if (job.getJobState() == JobStatus.RUNNING) { jobId = job.getJobId(); LOG.info("Job running. ID: " + jobId); break; } } // Retry if job is not available yet if (jobId == null) { Thread.sleep(100L); } } // Trigger savepoint String savepointPath = null; for (int i = 0; i < 20; i++) { LOG.info("Triggering savepoint (" + (i + 1) + "/20)."); try { savepointPath = clusterClient.triggerSavepoint(jobId, null) .get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (Exception cause) { LOG.info("Failed to trigger savepoint. Retrying...", cause); // This can fail if the operators are not opened yet Thread.sleep(500); } } assertNotNull("Failed to trigger savepoint", savepointPath); clusterClient.disposeSavepoint(savepointPath).get(); clusterClient.cancel(jobId).get(); // make sure, the execution is finished to not influence other test methods invokeThread.join(deadline.timeLeft().toMillis()); assertFalse("Program invoke thread still running", invokeThread.isAlive()); }