Java Code Examples for org.apache.flink.streaming.api.graph.StreamGraph#setJobName()
The following examples show how to use
org.apache.flink.streaming.api.graph.StreamGraph#setJobName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamPlanEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public JobExecutionResult execute(String jobName) throws Exception { StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); transformations.clear(); if (env instanceof OptimizerPlanEnvironment) { ((OptimizerPlanEnvironment) env).setPlan(streamGraph); } else if (env instanceof PreviewPlanEnvironment) { ((PreviewPlanEnvironment) env).setPreview(streamGraph.getStreamingPlanAsJSON()); } throw new OptimizerPlanEnvironment.ProgramAbortException(); }
Example 2
Source File: RemoteStreamEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Executes the job remotely. * * <p>This method can be used independent of the {@link StreamExecutionEnvironment} type. * @return The result of the job execution, containing elapsed time and accumulators. */ @PublicEvolving public static JobExecutionResult executeRemotely(StreamExecutionEnvironment streamExecutionEnvironment, List<URL> jarFiles, String host, int port, Configuration clientConfiguration, List<URL> globalClasspaths, String jobName, SavepointRestoreSettings savepointRestoreSettings ) throws ProgramInvocationException { StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph(); streamGraph.setJobName(jobName); return executeRemotely(streamGraph, streamExecutionEnvironment.getClass().getClassLoader(), streamExecutionEnvironment.getConfig(), jarFiles, host, port, clientConfiguration, globalClasspaths, savepointRestoreSettings); }
Example 3
Source File: StreamContextEnvironment.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public JobExecutionResult execute(String jobName) throws Exception { Preconditions.checkNotNull(jobName, "Streaming Job name should not be null."); StreamGraph streamGraph = this.getStreamGraph(); streamGraph.setJobName(jobName); transformations.clear(); // execute the programs if (ctx instanceof DetachedEnvironment) { LOG.warn("Job was executed in detached mode, the results will be available on completion."); ((DetachedEnvironment) ctx).setDetachedPlan(streamGraph); return DetachedEnvironment.DetachedJobExecutionResult.INSTANCE; } else { return ctx .getClient() .run(streamGraph, ctx.getJars(), ctx.getClasspaths(), ctx.getUserCodeClassLoader(), ctx.getSavepointRestoreSettings()) .getJobExecutionResult(); } }
Example 4
Source File: LocalStreamEnvironmentWithAsyncExecution.java From flink-crawler with Apache License 2.0 | 6 votes |
/** * This method lets you start a job and immediately return. * * @param jobName * @return * @throws Exception */ public JobSubmissionResult executeAsync(String jobName) throws Exception { // transform the streaming program into a JobGraph StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); JobGraph jobGraph = streamGraph.getJobGraph(); Configuration configuration = new Configuration(); configuration.addAll(jobGraph.getJobConfiguration()); configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, jobGraph.getMaximumParallelism()); // add (and override) the settings with what the user defined configuration.addAll(_conf); _exec = new LocalFlinkMiniCluster(configuration, true); _exec.start(true); // The above code is all basically the same as Flink's LocalStreamEnvironment. // The change is that here we call submitJobDetached vs. submitJobAndWait. // We assume that eventually someone calls stop(job id), which then terminates // the LocalFlinkMinimCluster. return _exec.submitJobDetached(jobGraph); }
Example 5
Source File: ExecutionContext.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private FlinkPlan createPlan(String name, Configuration flinkConfig) { if (streamExecEnv != null) { final StreamGraph graph = streamExecEnv.getStreamGraph(); graph.setJobName(name); return graph; } else { final int parallelism = execEnv.getParallelism(); final Plan unoptimizedPlan = execEnv.createProgramPlan(); unoptimizedPlan.setJobName(name); final Optimizer compiler = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), flinkConfig); return ClusterClient.getOptimizedPlan(compiler, unoptimizedPlan, parallelism); } }
Example 6
Source File: TestStreamEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public JobExecutionResult execute(String jobName) throws Exception { final StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); final JobGraph jobGraph = streamGraph.getJobGraph(); for (Path jarFile : jarFiles) { jobGraph.addJar(jarFile); } jobGraph.setClasspaths(new ArrayList<>(classPaths)); return jobExecutor.executeJobBlocking(jobGraph); }
Example 7
Source File: RemoteStreamEnvironment.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public JobExecutionResult execute(String jobName) throws ProgramInvocationException { StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); transformations.clear(); return executeRemotely(streamGraph, jarFiles); }
Example 8
Source File: FlinkTestUtil.java From AthenaX with Apache License 2.0 | 5 votes |
static LocalFlinkMiniCluster execute(LocalStreamEnvironment env, Configuration conf, String jobName) throws Exception { StreamGraph streamGraph = env.getStreamGraph(); streamGraph.setJobName(jobName); JobGraph jobGraph = streamGraph.getJobGraph(); Configuration configuration = new Configuration(conf); configuration.addAll(jobGraph.getJobConfiguration()); configuration.setLong("taskmanager.memory.size", -1L); configuration.setInteger("taskmanager.numberOfTaskSlots", jobGraph.getMaximumParallelism()); LocalFlinkMiniCluster cluster = new LocalFlinkMiniCluster(configuration, true); cluster.start(); cluster.submitJobDetached(jobGraph); return cluster; }
Example 9
Source File: LocalStreamEnvironmentWithAsyncExecution.java From flink-crawler with Apache License 2.0 | 5 votes |
/** * Executes the JobGraph of the on a mini cluster of CLusterUtil with a user specified name. * * @param jobName * name of the job * @return The result of the job execution, containing elapsed time and accumulators. */ @Override public JobExecutionResult execute(String jobName) throws Exception { // transform the streaming program into a JobGraph StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); JobGraph jobGraph = streamGraph.getJobGraph(); Configuration configuration = new Configuration(); configuration.addAll(jobGraph.getJobConfiguration()); configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, jobGraph.getMaximumParallelism()); // add (and override) the settings with what the user defined configuration.addAll(_conf); _exec = new LocalFlinkMiniCluster(configuration, true); try { _exec.start(); return _exec.submitJobAndWait(jobGraph, getConfig().isSysoutLoggingEnabled()); } finally { transformations.clear(); _exec.stop(); _exec = null; } }
Example 10
Source File: DFRemoteStreamEnvironment.java From df_data_service with Apache License 2.0 | 5 votes |
@Override public JobExecutionResult execute(String jobName) throws ProgramInvocationException { StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); transformations.clear(); return executeRemotely(streamGraph, jarFiles); }
Example 11
Source File: BatchExecutor.java From flink with Apache License 2.0 | 5 votes |
@Override public Pipeline createPipeline(List<Transformation<?>> transformations, TableConfig tableConfig, String jobName) { StreamExecutionEnvironment execEnv = getExecutionEnvironment(); ExecutorUtils.setBatchProperties(execEnv, tableConfig); StreamGraph streamGraph = ExecutorUtils.generateStreamGraph(execEnv, transformations); streamGraph.setJobName(getNonEmptyJobName(jobName)); ExecutorUtils.setBatchProperties(streamGraph, tableConfig); return streamGraph; }
Example 12
Source File: LocalStreamEnvironment.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Executes the JobGraph of the on a mini cluster of CLusterUtil with a user * specified name. * * @param jobName * name of the job * @return The result of the job execution, containing elapsed time and accumulators. */ @Override public JobExecutionResult execute(String jobName) throws Exception { // transform the streaming program into a JobGraph StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); JobGraph jobGraph = streamGraph.getJobGraph(); jobGraph.setAllowQueuedScheduling(true); Configuration configuration = new Configuration(); configuration.addAll(jobGraph.getJobConfiguration()); configuration.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "0"); // add (and override) the settings with what the user defined configuration.addAll(this.configuration); if (!configuration.contains(RestOptions.BIND_PORT)) { configuration.setString(RestOptions.BIND_PORT, "0"); } int numSlotsPerTaskManager = configuration.getInteger(TaskManagerOptions.NUM_TASK_SLOTS, jobGraph.getMaximumParallelism()); MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder() .setConfiguration(configuration) .setNumSlotsPerTaskManager(numSlotsPerTaskManager) .build(); if (LOG.isInfoEnabled()) { LOG.info("Running job on local embedded Flink mini cluster"); } MiniCluster miniCluster = new MiniCluster(cfg); try { miniCluster.start(); configuration.setInteger(RestOptions.PORT, miniCluster.getRestAddress().get().getPort()); return miniCluster.executeJobBlocking(jobGraph); } finally { transformations.clear(); miniCluster.close(); } }
Example 13
Source File: DFRemoteStreamEnvironment.java From df_data_service with Apache License 2.0 | 4 votes |
public JobExecutionResult executeWithDFObj(String jobName, DFJobPOPJ dfJobPOPJ) throws ProgramInvocationException { StreamGraph streamGraph = getStreamGraph(); streamGraph.setJobName(jobName); transformations.clear(); return executeRemotely(streamGraph, jarFiles, dfJobPOPJ); }
Example 14
Source File: StreamExecutor.java From flink with Apache License 2.0 | 4 votes |
@Override public Pipeline createPipeline(List<Transformation<?>> transformations, TableConfig tableConfig, String jobName) { StreamGraph streamGraph = ExecutorUtils.generateStreamGraph(getExecutionEnvironment(), transformations); streamGraph.setJobName(getNonEmptyJobName(jobName)); return streamGraph; }