Java Code Examples for org.apache.flink.api.common.JobID#generate()
The following examples show how to use
org.apache.flink.api.common.JobID#generate() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LocalEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override @PublicEvolving public void startNewSession() throws Exception { if (executor != null) { // we need to end the previous session executor.stop(); // create also a new JobID jobID = JobID.generate(); } // create a new local executor executor = PlanExecutor.createLocalExecutor(configuration); executor.setPrintStatusDuringExecution(getConfig().isSysoutLoggingEnabled()); // if we have a session, start the mini cluster eagerly to have it available across sessions if (getSessionTimeout() > 0) { executor.start(); // also install the reaper that will shut it down eventually executorReaper = new ExecutorReaper(executor); } }
Example 2
Source File: PrometheusReporterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void metricIsRemovedWhenCollectorIsNotUnregisteredYet() throws UnirestException { TaskManagerMetricGroup tmMetricGroup = new TaskManagerMetricGroup(registry, HOST_NAME, TASK_MANAGER); String metricName = "metric"; Counter metric1 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup1 = new FrontMetricGroup<>( createReporterScopedSettings(), new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_1")); reporter.notifyOfAddedMetric(metric1, metricName, metricGroup1); Counter metric2 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup2 = new FrontMetricGroup<>( createReporterScopedSettings(), new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_2")); reporter.notifyOfAddedMetric(metric2, metricName, metricGroup2); reporter.notifyOfRemovedMetric(metric1, metricName, metricGroup1); String response = pollMetrics(reporter.getPort()).getBody(); assertThat(response, not(containsString("job_1"))); }
Example 3
Source File: PrometheusReporterTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void metricIsRemovedWhenCollectorIsNotUnregisteredYet() throws UnirestException { TaskManagerMetricGroup tmMetricGroup = new TaskManagerMetricGroup(registry, HOST_NAME, TASK_MANAGER); String metricName = "metric"; Counter metric1 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup1 = new FrontMetricGroup<>(0, new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_1")); reporter.notifyOfAddedMetric(metric1, metricName, metricGroup1); Counter metric2 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup2 = new FrontMetricGroup<>(0, new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_2")); reporter.notifyOfAddedMetric(metric2, metricName, metricGroup2); reporter.notifyOfRemovedMetric(metric1, metricName, metricGroup1); String response = pollMetrics(reporter.getPort()).getBody(); assertThat(response, not(containsString("job_1"))); }
Example 4
Source File: PrometheusReporterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void metricIsRemovedWhenCollectorIsNotUnregisteredYet() throws UnirestException { TaskManagerMetricGroup tmMetricGroup = new TaskManagerMetricGroup(registry, HOST_NAME, TASK_MANAGER); String metricName = "metric"; Counter metric1 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup1 = new FrontMetricGroup<>(0, new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_1")); reporter.notifyOfAddedMetric(metric1, metricName, metricGroup1); Counter metric2 = new SimpleCounter(); FrontMetricGroup<TaskManagerJobMetricGroup> metricGroup2 = new FrontMetricGroup<>(0, new TaskManagerJobMetricGroup(registry, tmMetricGroup, JobID.generate(), "job_2")); reporter.notifyOfAddedMetric(metric2, metricName, metricGroup2); reporter.notifyOfRemovedMetric(metric1, metricName, metricGroup1); String response = pollMetrics(reporter.getPort()).getBody(); assertThat(response, not(containsString("job_1"))); }
Example 5
Source File: StandaloneApplicationClusterConfigurationParserFactoryTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testEntrypointClusterConfigurationToConfigurationParsing() throws FlinkParseException { final JobID jobID = JobID.generate(); final SavepointRestoreSettings savepointRestoreSettings = SavepointRestoreSettings.forPath("/test/savepoint/path", true); final String key = DeploymentOptions.TARGET.key(); final String value = "testDynamicExecutorConfig"; final int restPort = 1234; final String arg1 = "arg1"; final String arg2 = "arg2"; final String[] args = { "--configDir", confDirPath, "--job-id", jobID.toHexString(), "--fromSavepoint", savepointRestoreSettings.getRestorePath(), "--allowNonRestoredState", "--webui-port", String.valueOf(restPort), "--job-classname", JOB_CLASS_NAME, String.format("-D%s=%s", key, value), arg1, arg2}; final StandaloneApplicationClusterConfiguration clusterConfiguration = commandLineParser.parse(args); assertThat(clusterConfiguration.getJobClassName(), is(equalTo(JOB_CLASS_NAME))); assertThat(clusterConfiguration.getArgs(), arrayContaining(arg1, arg2)); final Configuration configuration = StandaloneApplicationClusterEntryPoint .loadConfigurationFromClusterConfig(clusterConfiguration); final String strJobId = configuration.get(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID); assertThat(JobID.fromHexString(strJobId), is(equalTo(jobID))); assertThat(SavepointRestoreSettings.fromConfiguration(configuration), is(equalTo(savepointRestoreSettings))); assertThat(configuration.get(RestOptions.PORT), is(equalTo(restPort))); assertThat(configuration.get(DeploymentOptions.TARGET), is(equalTo(value))); }
Example 6
Source File: RemoteEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override @PublicEvolving public void startNewSession() throws Exception { dispose(); jobID = JobID.generate(); installShutdownHook(); }
Example 7
Source File: CliClientTest.java From flink with Apache License 2.0 | 5 votes |
@Override public ProgramTargetDescriptor executeUpdate(String sessionId, String statement) throws SqlExecutionException { receivedContext = sessionMap.get(sessionId); receivedStatement = statement; if (failExecution) { throw new SqlExecutionException("Fail execution."); } JobID jobID = JobID.generate(); return new ProgramTargetDescriptor(jobID); }
Example 8
Source File: StatefulFunctionsClusterEntryPoint.java From flink-statefun with Apache License 2.0 | 5 votes |
@Nonnull private static JobID createJobIdForCluster(Configuration globalConfiguration) { if (HighAvailabilityMode.isHighAvailabilityModeActivated(globalConfiguration)) { return ZERO_JOB_ID; } else { return JobID.generate(); } }
Example 9
Source File: StatefulFunctionsClusterEntryPoint.java From stateful-functions with Apache License 2.0 | 5 votes |
@Nonnull private static JobID createJobIdForCluster(Configuration globalConfiguration) { if (HighAvailabilityMode.isHighAvailabilityModeActivated(globalConfiguration)) { return ZERO_JOB_ID; } else { return JobID.generate(); } }
Example 10
Source File: ZooKeeperRegistryTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 11
Source File: StandaloneJobClusterEntryPoint.java From flink with Apache License 2.0 | 5 votes |
@Nonnull private static JobID createJobIdForCluster(Configuration globalConfiguration) { if (HighAvailabilityMode.isHighAvailabilityModeActivated(globalConfiguration)) { return ZERO_JOB_ID; } else { return JobID.generate(); } }
Example 12
Source File: ZooKeeperRegistryTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 13
Source File: ZooKeeperRegistryTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests that the function of ZookeeperRegistry, setJobRunning(), setJobFinished(), isJobRunning() */ @Test public void testZooKeeperRegistry() throws Exception { Configuration configuration = new Configuration(); configuration.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, testingServer.getConnectString()); configuration.setString(HighAvailabilityOptions.HA_MODE, "zookeeper"); final HighAvailabilityServices zkHaService = new ZooKeeperHaServices( ZooKeeperUtils.startCuratorFramework(configuration), Executors.directExecutor(), configuration, new VoidBlobStore()); final RunningJobsRegistry zkRegistry = zkHaService.getRunningJobsRegistry(); try { JobID jobID = JobID.generate(); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobRunning(jobID); assertEquals(JobSchedulingStatus.RUNNING, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.setJobFinished(jobID); assertEquals(JobSchedulingStatus.DONE, zkRegistry.getJobSchedulingStatus(jobID)); zkRegistry.clearJob(jobID); assertEquals(JobSchedulingStatus.PENDING, zkRegistry.getJobSchedulingStatus(jobID)); } finally { zkHaService.close(); } }
Example 14
Source File: StandaloneJobClusterEntryPoint.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Nonnull private static JobID createJobIdForCluster(Configuration globalConfiguration) { if (HighAvailabilityMode.isHighAvailabilityModeActivated(globalConfiguration)) { return ZERO_JOB_ID; } else { return JobID.generate(); } }
Example 15
Source File: ExecutionEnvironment.java From flink with Apache License 2.0 | 4 votes |
/** * Creates a new Execution Environment. */ protected ExecutionEnvironment() { jobID = JobID.generate(); }
Example 16
Source File: HistoryServerTest.java From flink with Apache License 2.0 | 4 votes |
private static void createLegacyArchive(Path directory) throws IOException { JobID jobID = JobID.generate(); StringWriter sw = new StringWriter(); try (JsonGenerator gen = JACKSON_FACTORY.createGenerator(sw)) { try (JsonObject root = new JsonObject(gen)) { try (JsonArray finished = new JsonArray(gen, "finished")) { try (JsonObject job = new JsonObject(gen)) { gen.writeStringField("jid", jobID.toString()); gen.writeStringField("name", "testjob"); gen.writeStringField("state", JobStatus.FINISHED.name()); gen.writeNumberField("start-time", 0L); gen.writeNumberField("end-time", 1L); gen.writeNumberField("duration", 1L); gen.writeNumberField("last-modification", 1L); try (JsonObject tasks = new JsonObject(gen, "tasks")) { gen.writeNumberField("total", 0); if (versionLessThan14) { gen.writeNumberField("pending", 0); } else { gen.writeNumberField("created", 0); gen.writeNumberField("deploying", 0); gen.writeNumberField("scheduled", 0); } gen.writeNumberField("running", 0); gen.writeNumberField("finished", 0); gen.writeNumberField("canceling", 0); gen.writeNumberField("canceled", 0); gen.writeNumberField("failed", 0); } } } } } String json = sw.toString(); ArchivedJson archivedJson = new ArchivedJson("/joboverview", json); FsJobArchivist.archiveJob(new org.apache.flink.core.fs.Path(directory.toUri()), jobID, Collections.singleton(archivedJson)); }
Example 17
Source File: HistoryServerTest.java From flink with Apache License 2.0 | 4 votes |
private static String createLegacyArchive(Path directory) throws IOException { JobID jobId = JobID.generate(); StringWriter sw = new StringWriter(); try (JsonGenerator gen = JACKSON_FACTORY.createGenerator(sw)) { try (JsonObject root = new JsonObject(gen)) { try (JsonArray finished = new JsonArray(gen, "finished")) { try (JsonObject job = new JsonObject(gen)) { gen.writeStringField("jid", jobId.toString()); gen.writeStringField("name", "testjob"); gen.writeStringField("state", JobStatus.FINISHED.name()); gen.writeNumberField("start-time", 0L); gen.writeNumberField("end-time", 1L); gen.writeNumberField("duration", 1L); gen.writeNumberField("last-modification", 1L); try (JsonObject tasks = new JsonObject(gen, "tasks")) { gen.writeNumberField("total", 0); if (versionLessThan14) { gen.writeNumberField("pending", 0); } else { gen.writeNumberField("created", 0); gen.writeNumberField("deploying", 0); gen.writeNumberField("scheduled", 0); } gen.writeNumberField("running", 0); gen.writeNumberField("finished", 0); gen.writeNumberField("canceling", 0); gen.writeNumberField("canceled", 0); gen.writeNumberField("failed", 0); } } } } } String json = sw.toString(); ArchivedJson archivedJson = new ArchivedJson("/joboverview", json); FsJobArchivist.archiveJob(new org.apache.flink.core.fs.Path(directory.toUri()), jobId, Collections.singleton(archivedJson)); return jobId.toString(); }
Example 18
Source File: ContextEnvironment.java From flink with Apache License 2.0 | 4 votes |
@Override public void startNewSession() throws Exception { jobID = JobID.generate(); }
Example 19
Source File: ContextEnvironment.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public void startNewSession() throws Exception { client.endSession(jobID); jobID = JobID.generate(); }
Example 20
Source File: ExecutionEnvironment.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Creates a new Execution Environment. */ protected ExecutionEnvironment() { jobID = JobID.generate(); }