org.apache.flink.streaming.connectors.cassandra.CassandraSink Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.cassandra.CassandraSink.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraPojoSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #2
Source File: CassandraPojoSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #3
Source File: CassandraTupleSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example #4
Source File: CassandraTupleWriteAheadSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO example.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #5
Source File: CassandraPojoSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #6
Source File: CassandraTupleSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example #7
Source File: CassandraTupleWriteAheadSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO zhisheng.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #8
Source File: CassandraTupleWriteAheadSinkExample.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO example.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #9
Source File: CassandraTupleSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example #10
Source File: CassandraTupleWriteAheadSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO example.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #11
Source File: CassandraPojoSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #12
Source File: CassandraTupleSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example #13
Source File: CassandraTupleWriteAheadSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO zhisheng.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #14
Source File: CassandraPojoSinkExample.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #15
Source File: CassandraTupleSinkExample.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Tuple2<String, Integer>> source = env.fromCollection(collection); CassandraSink.addSink(source) .setQuery(INSERT) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); env.execute("WriteTupleIntoCassandra"); }
Example #16
Source File: CassandraPojoSink.java From blog_demos with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //设置并行度 env.setParallelism(1); //连接kafka用到的属性对象 Properties properties = new Properties(); //broker地址 properties.setProperty("bootstrap.servers", "192.168.50.43:9092"); //zookeeper地址 properties.setProperty("zookeeper.connect", "192.168.50.43:2181"); //消费者的groupId properties.setProperty("group.id", "flink-connector"); //实例化Consumer类 FlinkKafkaConsumer<String> flinkKafkaConsumer = new FlinkKafkaConsumer<>( "test001", new SimpleStringSchema(), properties ); //指定从最新位置开始消费,相当于放弃历史消息 flinkKafkaConsumer.setStartFromLatest(); //通过addSource方法得到DataSource DataStream<String> dataStream = env.addSource(flinkKafkaConsumer); DataStream<WordCount> result = dataStream .flatMap(new FlatMapFunction<String, WordCount>() { @Override public void flatMap(String s, Collector<WordCount> collector) throws Exception { String[] words = s.toLowerCase().split("\\s"); for (String word : words) { if (!word.isEmpty()) { //cassandra的表中,每个word都是主键,因此不能为空 collector.collect(new WordCount(word, 1L)); } } } }) .keyBy("word") .timeWindow(Time.seconds(5)) .reduce(new ReduceFunction<WordCount>() { @Override public WordCount reduce(WordCount wordCount, WordCount t1) throws Exception { return new WordCount(wordCount.getWord(), wordCount.getCount() + t1.getCount()); } }); result.addSink(new PrintSinkFunction<>()) .name("print Sink") .disableChaining(); CassandraSink.addSink(result) .setHost("192.168.133.168") .setMapperOptions(() -> new Mapper.Option[] { Mapper.Option.saveNullFields(true) }) .build() .name("cassandra Sink") .disableChaining(); env.execute("kafka-2.4 source, cassandra-3.11.6 sink, pojo"); }
Example #17
Source File: CassandraTuple2Sink.java From blog_demos with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //设置并行度 env.setParallelism(1); //连接kafka用到的属性对象 Properties properties = new Properties(); //broker地址 properties.setProperty("bootstrap.servers", "192.168.50.43:9092"); //zookeeper地址 properties.setProperty("zookeeper.connect", "192.168.50.43:2181"); //消费者的groupId properties.setProperty("group.id", "flink-connector"); //实例化Consumer类 FlinkKafkaConsumer<String> flinkKafkaConsumer = new FlinkKafkaConsumer<>( "test001", new SimpleStringSchema(), properties ); //指定从最新位置开始消费,相当于放弃历史消息 flinkKafkaConsumer.setStartFromLatest(); //通过addSource方法得到DataSource DataStream<String> dataStream = env.addSource(flinkKafkaConsumer); DataStream<Tuple2<String, Long>> result = dataStream .flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() { @Override public void flatMap(String value, Collector<Tuple2<String, Long>> out) { String[] words = value.toLowerCase().split("\\s"); for (String word : words) { //cassandra的表中,每个word都是主键,因此不能为空 if (!word.isEmpty()) { out.collect(new Tuple2<String, Long>(word, 1L)); } } } } ) .keyBy(0) .timeWindow(Time.seconds(5)) .sum(1); result.addSink(new PrintSinkFunction<>()) .name("print Sink") .disableChaining(); CassandraSink.addSink(result) .setQuery("INSERT INTO example.wordcount(word, count) values (?, ?);") .setHost("192.168.133.168") .build() .name("cassandra Sink") .disableChaining(); env.execute("kafka-2.4 source, cassandra-3.11.6 sink, tuple2"); }