Java Code Examples for org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#getExecutionEnvironment()
The following examples show how to use
org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#getExecutionEnvironment() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SplitSideOutputTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSelectAfterSideOutputIsForbidden() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); SingleOutputStreamOperator<String> processInput = env.fromElements("foo") .process(new DummyProcessFunction()); processInput.getSideOutput(outputTag); try { processInput.split(Collections::singleton); Assert.fail("Should have failed early with an exception."); } catch (UnsupportedOperationException expected){ // expected } }
Example 2
Source File: CassandraTupleSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example 3
Source File: AllWindowTranslationTest.java From flink with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("rawtypes") public void testFoldProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple3<String, String, Integer>> window = source .windowAll(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .fold(new Tuple3<>("", "", 0), new DummyFolder()); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof WindowOperator); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof FoldingStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example 4
Source File: AllWindowTranslationTest.java From flink with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("rawtypes") public void testReduceProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source .windowAll(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .reduce(new DummyReducer()); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof WindowOperator); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof ReducingStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example 5
Source File: DataStreamTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testTupleNestedArrayKeyRejection() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<Integer[], String>> input = env.fromElements( new Tuple2<>(new Integer[] {1, 2}, "test-test")); TypeInformation<?> expectedTypeInfo = new TupleTypeInfo<Tuple2<Integer[], String>>( BasicArrayTypeInfo.INT_ARRAY_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO); // adjust the rule expectedException.expect(InvalidProgramException.class); expectedException.expectMessage(new StringStartsWith("Type " + expectedTypeInfo + " cannot be used as key.")); input.keyBy(new KeySelector<Tuple2<Integer[], String>, Tuple2<Integer[], String>>() { @Override public Tuple2<Integer[], String> getKey(Tuple2<Integer[], String> value) throws Exception { return value; } }); }
Example 6
Source File: WindowTranslationTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("rawtypes") public void testReduceEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source .keyBy(new TupleKeySelector()) .window(SlidingEventTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .reduce(new DummyReducer()); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof WindowOperator); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof EventTimeTrigger); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingEventTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof ReducingStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example 7
Source File: IncrementalLearningSkeleton.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); DataStream<Integer> trainingData = env.addSource(new FiniteTrainingDataSource()); DataStream<Integer> newData = env.addSource(new FiniteNewDataSource()); DataStream<Double[]> model = trainingData .assignTimestampsAndWatermarks(new LinearTimestamp()) .timeWindowAll(Time.of(5000, TimeUnit.MILLISECONDS)) .apply(new PartialModelBuilder()); newData.connect(model).map(new Predictor()).print(); env.execute("Streaming Incremental Learning"); }
Example 8
Source File: CassandraTupleSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example 9
Source File: Main.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { //参数检查 if (args.length != 2) { System.err.println("USAGE:\nSocketTextStreamWordCount <hostname> <port>"); return; } String hostname = args[0]; Integer port = Integer.parseInt(args[1]); // set up the streaming execution environment final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //获取数据 DataStreamSource<String> stream = env.socketTextStream(hostname, port); //计数 SingleOutputStreamOperator<Tuple2<String, Integer>> sum = stream.flatMap(new LineSplitter()) .keyBy(0) .sum(1); sum.print(); env.execute("Java WordCount from SocketTextStream Example"); }
Example 10
Source File: AllWindowTranslationTest.java From flink with Apache License 2.0 | 6 votes |
/** * .reduce() does not support RichReduceFunction, since the reduce function is used internally * in a {@code ReducingState}. */ @Test(expected = UnsupportedOperationException.class) public void testReduceWithRichReducerFails() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); source .windowAll(SlidingEventTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .reduce(new RichReduceFunction<Tuple2<String, Integer>>() { private static final long serialVersionUID = -6448847205314995812L; @Override public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception { return null; } }); fail("exception was not thrown"); }
Example 11
Source File: CustomTableSinkMain.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment(); blinkStreamEnv.setParallelism(1); EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings); String path = SQLExampleWordCount.class.getClassLoader().getResource("words.txt").getPath(); CsvTableSource csvTableSource = CsvTableSource.builder() .field("word", Types.STRING) .path(path) .build(); blinkStreamTableEnv.registerTableSource("zhisheng", csvTableSource); RetractStreamTableSink<Row> retractStreamTableSink = new MyRetractStreamTableSink(new String[]{"c", "word"}, new TypeInformation[]{Types.LONG, Types.STRING}); //或者 // RetractStreamTableSink<Row> retractStreamTableSink = new MyRetractStreamTableSink(new String[]{"c", "word"}, new DataType[]{DataTypes.BIGINT(), DataTypes.STRING()}); blinkStreamTableEnv.registerTableSink("sinkTable", retractStreamTableSink); Table wordWithCount = blinkStreamTableEnv.sqlQuery("SELECT count(word) AS c, word FROM zhisheng GROUP BY word"); wordWithCount.insertInto("sinkTable"); blinkStreamTableEnv.execute("Blink Custom Table Sink"); }
Example 12
Source File: IncrementalLearningSkeleton.java From flink with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // Checking input parameters final ParameterTool params = ParameterTool.fromArgs(args); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); DataStream<Integer> trainingData = env.addSource(new FiniteTrainingDataSource()); DataStream<Integer> newData = env.addSource(new FiniteNewDataSource()); // build new model on every second of new data DataStream<Double[]> model = trainingData .assignTimestampsAndWatermarks(new LinearTimestamp()) .timeWindowAll(Time.of(5000, TimeUnit.MILLISECONDS)) .apply(new PartialModelBuilder()); // use partial model for newData DataStream<Integer> prediction = newData.connect(model).map(new Predictor()); // emit result if (params.has("output")) { prediction.writeAsText(params.get("output")); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); prediction.print(); } // execute program env.execute("Streaming Incremental Learning"); }
Example 13
Source File: Harness.java From flink-statefun with Apache License 2.0 | 5 votes |
public void start() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); configureStrictlyRequiredFlinkConfigs(flinkConfig); // Configure will change the value of a setting only if a corresponding option was set in the // underlying configuration. If a key is not present, the current value of a field will remain // untouched. env.configure(flinkConfig, Thread.currentThread().getContextClassLoader()); StatefulFunctionsConfig stateFunConfig = new StatefulFunctionsConfig(flinkConfig); stateFunConfig.addAllGlobalConfigurations(globalConfigurations); stateFunConfig.setProvider(new HarnessProvider(overrideIngress, overrideEgress)); StatefulFunctionsJob.main(env, stateFunConfig); }
Example 14
Source File: YARNITCase.java From flink with Apache License 2.0 | 5 votes |
private JobGraph getTestingJobGraph() { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(2); env.addSource(new NoDataSource()) .shuffle() .addSink(new DiscardingSink<>()); return env.getStreamGraph().getJobGraph(); }
Example 15
Source File: WriteToKafka.java From kafka-flink-101 with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); DataStream<String> stream = env.addSource(new SimpleStringGenerator()); stream.addSink(new FlinkKafkaProducer09<>("flink-demo", new SimpleStringSchema(), properties)); env.execute(); }
Example 16
Source File: SessionWindowITCase.java From flink with Apache License 2.0 | 5 votes |
private void runTest( SourceFunction<SessionEvent<Integer, TestEventPayload>> dataSource, WindowFunction<SessionEvent<Integer, TestEventPayload>, String, Tuple, TimeWindow> windowFunction) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); WindowedStream<SessionEvent<Integer, TestEventPayload>, Tuple, TimeWindow> windowedStream = env.addSource(dataSource).keyBy("sessionKey") .window(EventTimeSessionWindows.withGap(Time.milliseconds(MAX_SESSION_EVENT_GAP_MS))); if (ALLOWED_LATENESS_MS != Long.MAX_VALUE) { windowedStream = windowedStream.allowedLateness(Time.milliseconds(ALLOWED_LATENESS_MS)); } if (PURGE_WINDOW_ON_FIRE) { windowedStream = windowedStream.trigger(PurgingTrigger.of(EventTimeTrigger.create())); } windowedStream.apply(windowFunction).print(); JobExecutionResult result = env.execute(); // check that overall event counts match with our expectations. remember that late events within lateness will // each trigger a window! Assert.assertEquals( (LATE_EVENTS_PER_SESSION + 1) * NUMBER_OF_SESSIONS * EVENTS_PER_SESSION, (long) result.getAccumulatorResult(SESSION_COUNTER_ON_TIME_KEY)); Assert.assertEquals( NUMBER_OF_SESSIONS * (LATE_EVENTS_PER_SESSION * (LATE_EVENTS_PER_SESSION + 1) / 2), (long) result.getAccumulatorResult(SESSION_COUNTER_LATE_KEY)); }
Example 17
Source File: AbstractQueryableStateTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests simple value state queryable state instance with a default value * set. Each source emits (subtaskIndex, 0)..(subtaskIndex, numElements) * tuples, the key is mapped to 1 but key 0 is queried which should throw * a {@link UnknownKeyOrNamespaceException} exception. * * @throws UnknownKeyOrNamespaceException thrown due querying a non-existent key */ @Test(expected = UnknownKeyOrNamespaceException.class) public void testValueStateDefault() throws Throwable { final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT); final long numElements = 1024L; StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(stateBackend); env.setParallelism(maxParallelism); // Very important, because cluster is shared between tests and we // don't explicitly check that all slots are available before // submitting. env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L)); DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements)); ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>( "any", source.getType(), Tuple2.of(0, 1337L)); // only expose key "1" QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy( new KeySelector<Tuple2<Integer, Long>, Integer>() { private static final long serialVersionUID = 4509274556892655887L; @Override public Integer getKey(Tuple2<Integer, Long> value) { return 1; } }).asQueryableState("hakuna", valueState); try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) { final JobID jobId = autoCancellableJob.getJobId(); final JobGraph jobGraph = autoCancellableJob.getJobGraph(); clusterClient.setDetached(true); clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader()); // Now query int key = 0; CompletableFuture<ValueState<Tuple2<Integer, Long>>> future = getKvState( deadline, client, jobId, queryableState.getQueryableStateName(), key, BasicTypeInfo.INT_TYPE_INFO, valueState, true, executor); try { future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); } catch (ExecutionException | CompletionException e) { // get() on a completedExceptionally future wraps the // exception in an ExecutionException. throw e.getCause(); } } }
Example 18
Source File: StreamCheckpointNotifierITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Runs the following program. * <pre> * [ (source)->(filter) ] -> [ (co-map) ] -> [ (map) ] -> [ (groupBy/reduce)->(sink) ] * </pre> */ @Test public void testProgram() { try { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); assertEquals("test setup broken", PARALLELISM, env.getParallelism()); env.enableCheckpointing(500); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L)); final int numElements = 10000; final int numTaskTotal = PARALLELISM * 5; DataStream<Long> stream = env.addSource(new GeneratingSourceFunction(numElements, numTaskTotal)); stream // -------------- first vertex, chained to the src ---------------- .filter(new LongRichFilterFunction()) // -------------- second vertex, applying the co-map ---------------- .connect(stream).flatMap(new LeftIdentityCoRichFlatMapFunction()) // -------------- third vertex - the stateful one that also fails ---------------- .map(new IdentityMapFunction()) .startNewChain() // -------------- fourth vertex - reducer and the sink ---------------- .keyBy(0) .reduce(new OnceFailingReducer(numElements)) .addSink(new DiscardingSink<Tuple1<Long>>()); env.execute(); final long failureCheckpointID = OnceFailingReducer.failureCheckpointID; assertNotEquals(0L, failureCheckpointID); List<List<Long>[]> allLists = Arrays.asList( GeneratingSourceFunction.COMPLETED_CHECKPOINTS, LongRichFilterFunction.COMPLETED_CHECKPOINTS, LeftIdentityCoRichFlatMapFunction.COMPLETED_CHECKPOINTS, IdentityMapFunction.COMPLETED_CHECKPOINTS, OnceFailingReducer.COMPLETED_CHECKPOINTS ); for (List<Long>[] parallelNotifications : allLists) { for (List<Long> notifications : parallelNotifications) { assertTrue("No checkpoint notification was received.", notifications.size() > 0); assertFalse("Failure checkpoint was marked as completed.", notifications.contains(failureCheckpointID)); assertFalse("No checkpoint received after failure.", notifications.get(notifications.size() - 1) == failureCheckpointID); assertTrue("Checkpoint notification was received multiple times", notifications.size() == new HashSet<Long>(notifications).size()); } } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example 19
Source File: StreamGraphGeneratorTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testOutputTypeConfigurationWithTwoInputTransformation() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Integer> source1 = env.fromElements(1, 10); DataStream<Integer> source2 = env.fromElements(2, 11); ConnectedStreams<Integer, Integer> connectedSource = source1.connect(source2); OutputTypeConfigurableOperationWithTwoInputs outputTypeConfigurableOperation = new OutputTypeConfigurableOperationWithTwoInputs(); DataStream<Integer> result = connectedSource.transform( "Two input and output type configurable operation", BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation); result.addSink(new DiscardingSink<>()); env.getStreamGraph(); assertEquals(BasicTypeInfo.INT_TYPE_INFO, outputTypeConfigurableOperation.getTypeInformation()); }
Example 20
Source File: DataStreamTest.java From Flink-CEPplus with Apache License 2.0 | 3 votes |
@Test public void testSelectBetweenConsecutiveSplitRejection() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Integer> src = env.fromElements(0, 0); OutputSelector<Integer> outputSelector = new DummyOutputSelector<>(); src.split(outputSelector).select("dummy").split(outputSelector).addSink(new DiscardingSink<>()); expectedException.expect(IllegalStateException.class); expectedException.expectMessage("Consecutive multiple splits are not supported. Splits are deprecated. Please use side-outputs."); env.getStreamGraph(); }