com.datastax.driver.mapping.Mapper Java Examples
The following examples show how to use
com.datastax.driver.mapping.Mapper.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CassandraPojoSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #2
Source File: CassandraPojoSinkExample.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #3
Source File: CassandraPojoSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #4
Source File: CassandraPojoSink.java From flink with Apache License 2.0 | 6 votes |
@Override public void open(Configuration configuration) { super.open(configuration); try { this.mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(clazz); if (options != null) { Mapper.Option[] optionsArray = options.getMapperOptions(); if (optionsArray != null) { this.mapper.setDefaultSaveOptions(optionsArray); } } } catch (Exception e) { throw new RuntimeException("Cannot create CassandraPojoSink with input: " + clazz.getSimpleName(), e); } }
Example #5
Source File: CassandraPojoOutputFormat.java From flink with Apache License 2.0 | 6 votes |
/** * Opens a Session to Cassandra and initializes the prepared statement. * * @param taskNumber The number of the parallel instance. */ @Override public void open(int taskNumber, int numTasks) { this.session = cluster.connect(); MappingManager mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(outputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultSaveOptions(optionsArray); } } this.callback = new FutureCallback<Void>() { @Override public void onSuccess(Void ignored) { onWriteSuccess(); } @Override public void onFailure(Throwable t) { onWriteFailure(t); } }; }
Example #6
Source File: CassandraPojoOutputFormat.java From flink with Apache License 2.0 | 6 votes |
/** * Opens a Session to Cassandra and initializes the prepared statement. * * @param taskNumber The number of the parallel instance. */ @Override public void open(int taskNumber, int numTasks) { this.session = cluster.connect(); MappingManager mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(outputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultSaveOptions(optionsArray); } } this.callback = new FutureCallback<Void>() { @Override public void onSuccess(Void ignored) { onWriteSuccess(); } @Override public void onFailure(Throwable t) { onWriteFailure(t); } }; }
Example #7
Source File: CassandraPojoSink.java From flink with Apache License 2.0 | 6 votes |
@Override public void open(Configuration configuration) { super.open(configuration); try { this.mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(clazz); if (options != null) { Mapper.Option[] optionsArray = options.getMapperOptions(); if (optionsArray != null) { this.mapper.setDefaultSaveOptions(optionsArray); } } } catch (Exception e) { throw new RuntimeException("Cannot create CassandraPojoSink with input: " + clazz.getSimpleName(), e); } }
Example #8
Source File: CassandraPojoSinkExample.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #9
Source File: CassandraPojoSink.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public void open(Configuration configuration) { super.open(configuration); try { this.mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(clazz); if (options != null) { Mapper.Option[] optionsArray = options.getMapperOptions(); if (optionsArray != null) { this.mapper.setDefaultSaveOptions(optionsArray); } } } catch (Exception e) { throw new RuntimeException("Cannot create CassandraPojoSink with input: " + clazz.getSimpleName(), e); } }
Example #10
Source File: CassandraPojoSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStreamSource<Message> source = env.fromCollection(messages); CassandraSink.addSink(source) .setClusterBuilder(new ClusterBuilder() { @Override protected Cluster buildCluster(Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .setMapperOptions(() -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}) .build(); env.execute("Cassandra Sink example"); }
Example #11
Source File: CassandraPojoOutputFormat.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Opens a Session to Cassandra and initializes the prepared statement. * * @param taskNumber The number of the parallel instance. */ @Override public void open(int taskNumber, int numTasks) { this.session = cluster.connect(); MappingManager mappingManager = new MappingManager(session); this.mapper = mappingManager.mapper(outputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultSaveOptions(optionsArray); } } this.callback = new FutureCallback<Void>() { @Override public void onSuccess(Void ignored) { onWriteSuccess(); } @Override public void onFailure(Throwable t) { onWriteFailure(t); } }; }
Example #12
Source File: CassandraPojoInputFormat.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(InputSplit split) { this.session = cluster.connect(); MappingManager manager = new MappingManager(session); Mapper<OUT> mapper = manager.mapper(inputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultGetOptions(optionsArray); } } this.resultSet = mapper.map(session.execute(query)); }
Example #13
Source File: CassandraPojoInputFormat.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void open(InputSplit split) { this.session = cluster.connect(); MappingManager manager = new MappingManager(session); Mapper<OUT> mapper = manager.mapper(inputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultGetOptions(optionsArray); } } this.resultSet = mapper.map(session.execute(query)); }
Example #14
Source File: BatchPojoExample.java From flink with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); DataSet<CustomCassandraAnnotatedPojo> dataSet = env.fromCollection(customCassandraAnnotatedPojos); ClusterBuilder clusterBuilder = new ClusterBuilder() { private static final long serialVersionUID = -1754532803757154795L; @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoints("127.0.0.1").build(); } }; dataSet.output(new CassandraPojoOutputFormat<>(clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})); env.execute("Write"); /* * This is for the purpose of showing an example of creating a DataSet using CassandraPojoInputFormat. */ DataSet<CustomCassandraAnnotatedPojo> inputDS = env .createInput(new CassandraPojoInputFormat<>( SELECT_QUERY, clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.consistencyLevel(ConsistencyLevel.ANY)} )); inputDS.print(); }
Example #15
Source File: CassandraFactory.java From database-transform-tool with Apache License 2.0 | 5 votes |
/** * 描述: 删除数据 * 时间: 2017年11月15日 上午11:26:42 * @author yi.zhang * @param obj 对象 * @return 返回值 */ public int delete(Object obj) { try { if(session!=null){ init(servers, keyspace, username, password); } Mapper mapper = mapping.mapper(obj.getClass()); mapper.delete(obj); return 1; } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return -1; }
Example #16
Source File: CassandraFactory.java From database-transform-tool with Apache License 2.0 | 5 votes |
/** * 描述: 更新数据 * 时间: 2017年11月15日 上午11:26:42 * @author yi.zhang * @param obj 对象 * @return 返回值 */ public int update(Object obj) { try { if(session!=null){ init(servers, keyspace, username, password); } Mapper mapper = mapping.mapper(obj.getClass()); mapper.save(obj, Option.saveNullFields(false),Option.ttl(EXPIRE_TIME)); return 1; } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return -1; }
Example #17
Source File: CassandraFactory.java From database-transform-tool with Apache License 2.0 | 5 votes |
/** * 描述: 保存数据 * 时间: 2017年11月15日 上午11:26:42 * @author yi.zhang * @param obj 对象 * @return 返回值 */ public int save(Object obj) { try { if(session!=null){ init(servers, keyspace, username, password); } Mapper mapper = mapping.mapper(obj.getClass()); mapper.save(obj, Option.saveNullFields(true)); return 1; } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } return -1; }
Example #18
Source File: BatchPojoExample.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); DataSet<CustomCassandraAnnotatedPojo> dataSet = env.fromCollection(customCassandraAnnotatedPojos); ClusterBuilder clusterBuilder = new ClusterBuilder() { private static final long serialVersionUID = -1754532803757154795L; @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoints("127.0.0.1").build(); } }; dataSet.output(new CassandraPojoOutputFormat<>(clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})); env.execute("zhisheng"); /* * This is for the purpose of showing an example of creating a DataSet using CassandraPojoInputFormat. */ DataSet<CustomCassandraAnnotatedPojo> inputDS = env .createInput(new CassandraPojoInputFormat<>( SELECT_QUERY, clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.consistencyLevel(ConsistencyLevel.ANY)} )); inputDS.print(); }
Example #19
Source File: BatchPojoExample.java From flink with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); DataSet<CustomCassandraAnnotatedPojo> dataSet = env.fromCollection(customCassandraAnnotatedPojos); ClusterBuilder clusterBuilder = new ClusterBuilder() { private static final long serialVersionUID = -1754532803757154795L; @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoints("127.0.0.1").build(); } }; dataSet.output(new CassandraPojoOutputFormat<>(clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})); env.execute("Write"); /* * This is for the purpose of showing an example of creating a DataSet using CassandraPojoInputFormat. */ DataSet<CustomCassandraAnnotatedPojo> inputDS = env .createInput(new CassandraPojoInputFormat<>( SELECT_QUERY, clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.consistencyLevel(ConsistencyLevel.ANY)} )); inputDS.print(); }
Example #20
Source File: CassandraPojoInputFormat.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(InputSplit split) { this.session = cluster.connect(); MappingManager manager = new MappingManager(session); Mapper<OUT> mapper = manager.mapper(inputClass); if (mapperOptions != null) { Mapper.Option[] optionsArray = mapperOptions.getMapperOptions(); if (optionsArray != null) { mapper.setDefaultGetOptions(optionsArray); } } this.resultSet = mapper.map(session.execute(query)); }
Example #21
Source File: BatchPojoExample.java From flink-learning with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); DataSet<CustomCassandraAnnotatedPojo> dataSet = env.fromCollection(customCassandraAnnotatedPojos); ClusterBuilder clusterBuilder = new ClusterBuilder() { private static final long serialVersionUID = -1754532803757154795L; @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoints("127.0.0.1").build(); } }; dataSet.output(new CassandraPojoOutputFormat<>(clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})); env.execute("zhisheng"); /* * This is for the purpose of showing an example of creating a DataSet using CassandraPojoInputFormat. */ DataSet<CustomCassandraAnnotatedPojo> inputDS = env .createInput(new CassandraPojoInputFormat<>( SELECT_QUERY, clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.consistencyLevel(ConsistencyLevel.ANY)} )); inputDS.print(); }
Example #22
Source File: BatchPojoExample.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); DataSet<CustomCassandraAnnotatedPojo> dataSet = env.fromCollection(customCassandraAnnotatedPojos); ClusterBuilder clusterBuilder = new ClusterBuilder() { private static final long serialVersionUID = -1754532803757154795L; @Override protected Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoints("127.0.0.1").build(); } }; dataSet.output(new CassandraPojoOutputFormat<>(clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)})); env.execute("Write"); /* * This is for the purpose of showing an example of creating a DataSet using CassandraPojoInputFormat. */ DataSet<CustomCassandraAnnotatedPojo> inputDS = env .createInput(new CassandraPojoInputFormat<>( SELECT_QUERY, clusterBuilder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.consistencyLevel(ConsistencyLevel.ANY)} )); inputDS.print(); }
Example #23
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCassandraBatchPojoFormat() throws Exception { session.execute(CREATE_TABLE_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); OutputFormat<CustomCassandraAnnotatedPojo> sink = new CassandraPojoOutputFormat<>(builder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); try { sink.configure(new Configuration()); sink.open(0, 1); for (CustomCassandraAnnotatedPojo customCassandraAnnotatedPojo : customCassandraAnnotatedPojos) { sink.writeRecord(customCassandraAnnotatedPojo); } } finally { sink.close(); } ResultSet rs = session.execute(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); Assert.assertEquals(20, rs.all().size()); InputFormat<CustomCassandraAnnotatedPojo, InputSplit> source = new CassandraPojoInputFormat<>(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, "batches"), builder, CustomCassandraAnnotatedPojo.class); List<CustomCassandraAnnotatedPojo> result = new ArrayList<>(); try { source.configure(new Configuration()); source.open(null); while (!source.reachedEnd()) { CustomCassandraAnnotatedPojo temp = source.nextRecord(null); result.add(temp); } } finally { source.close(); } Assert.assertEquals(20, result.size()); result.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); customCassandraAnnotatedPojos.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); assertThat(result, samePropertyValuesAs(customCassandraAnnotatedPojos)); }
Example #24
Source File: CassandraPojoSink.java From blog_demos with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); //设置并行度 env.setParallelism(1); //连接kafka用到的属性对象 Properties properties = new Properties(); //broker地址 properties.setProperty("bootstrap.servers", "192.168.50.43:9092"); //zookeeper地址 properties.setProperty("zookeeper.connect", "192.168.50.43:2181"); //消费者的groupId properties.setProperty("group.id", "flink-connector"); //实例化Consumer类 FlinkKafkaConsumer<String> flinkKafkaConsumer = new FlinkKafkaConsumer<>( "test001", new SimpleStringSchema(), properties ); //指定从最新位置开始消费,相当于放弃历史消息 flinkKafkaConsumer.setStartFromLatest(); //通过addSource方法得到DataSource DataStream<String> dataStream = env.addSource(flinkKafkaConsumer); DataStream<WordCount> result = dataStream .flatMap(new FlatMapFunction<String, WordCount>() { @Override public void flatMap(String s, Collector<WordCount> collector) throws Exception { String[] words = s.toLowerCase().split("\\s"); for (String word : words) { if (!word.isEmpty()) { //cassandra的表中,每个word都是主键,因此不能为空 collector.collect(new WordCount(word, 1L)); } } } }) .keyBy("word") .timeWindow(Time.seconds(5)) .reduce(new ReduceFunction<WordCount>() { @Override public WordCount reduce(WordCount wordCount, WordCount t1) throws Exception { return new WordCount(wordCount.getWord(), wordCount.getCount() + t1.getCount()); } }); result.addSink(new PrintSinkFunction<>()) .name("print Sink") .disableChaining(); CassandraSink.addSink(result) .setHost("192.168.133.168") .setMapperOptions(() -> new Mapper.Option[] { Mapper.Option.saveNullFields(true) }) .build() .name("cassandra Sink") .disableChaining(); env.execute("kafka-2.4 source, cassandra-3.11.6 sink, pojo"); }
Example #25
Source File: CassandraAbstractModelDao.java From iotplatform with Apache License 2.0 | 4 votes |
protected Mapper<E> getMapper() { return cluster.getMapper(getColumnFamilyClass()); }
Example #26
Source File: AbstractCassandraCluster.java From iotplatform with Apache License 2.0 | 4 votes |
public <T> Mapper<T> getMapper(Class<T> clazz) { return mappingManager.mapper(clazz); }
Example #27
Source File: CustomerDao.java From micro-service with MIT License | 4 votes |
@PostConstruct public void init() { session = cluster.connect("mykeyspace"); customerMapper = new MappingManager(session).mapper(Customer.class); customerMapper.setDefaultSaveOptions(Mapper.Option.saveNullFields(false)); }
Example #28
Source File: CassandraConnectorITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testCassandraBatchPojoFormat() throws Exception { session.execute(CREATE_TABLE_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); OutputFormat<CustomCassandraAnnotatedPojo> sink = new CassandraPojoOutputFormat<>(builder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); try { sink.configure(new Configuration()); sink.open(0, 1); for (CustomCassandraAnnotatedPojo customCassandraAnnotatedPojo : customCassandraAnnotatedPojos) { sink.writeRecord(customCassandraAnnotatedPojo); } } finally { sink.close(); } ResultSet rs = session.execute(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); Assert.assertEquals(20, rs.all().size()); InputFormat<CustomCassandraAnnotatedPojo, InputSplit> source = new CassandraPojoInputFormat<>(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, "batches"), builder, CustomCassandraAnnotatedPojo.class); List<CustomCassandraAnnotatedPojo> result = new ArrayList<>(); try { source.configure(new Configuration()); source.open(null); while (!source.reachedEnd()) { CustomCassandraAnnotatedPojo temp = source.nextRecord(null); result.add(temp); } } finally { source.close(); } Assert.assertEquals(20, result.size()); result.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); customCassandraAnnotatedPojos.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); assertThat(result, samePropertyValuesAs(customCassandraAnnotatedPojos)); }
Example #29
Source File: CassandraConnectorITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testCassandraBatchPojoFormat() throws Exception { session.execute(CREATE_TABLE_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); OutputFormat<CustomCassandraAnnotatedPojo> sink = new CassandraPojoOutputFormat<>(builder, CustomCassandraAnnotatedPojo.class, () -> new Mapper.Option[]{Mapper.Option.saveNullFields(true)}); List<CustomCassandraAnnotatedPojo> customCassandraAnnotatedPojos = IntStream.range(0, 20) .mapToObj(x -> new CustomCassandraAnnotatedPojo(UUID.randomUUID().toString(), x, 0)) .collect(Collectors.toList()); try { sink.configure(new Configuration()); sink.open(0, 1); for (CustomCassandraAnnotatedPojo customCassandraAnnotatedPojo : customCassandraAnnotatedPojos) { sink.writeRecord(customCassandraAnnotatedPojo); } } finally { sink.close(); } ResultSet rs = session.execute(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, CustomCassandraAnnotatedPojo.TABLE_NAME)); Assert.assertEquals(20, rs.all().size()); InputFormat<CustomCassandraAnnotatedPojo, InputSplit> source = new CassandraPojoInputFormat<>(SELECT_DATA_QUERY.replace(TABLE_NAME_VARIABLE, "batches"), builder, CustomCassandraAnnotatedPojo.class); List<CustomCassandraAnnotatedPojo> result = new ArrayList<>(); try { source.configure(new Configuration()); source.open(null); while (!source.reachedEnd()) { CustomCassandraAnnotatedPojo temp = source.nextRecord(null); result.add(temp); } } finally { source.close(); } Assert.assertEquals(20, result.size()); result.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); customCassandraAnnotatedPojos.sort(Comparator.comparingInt(CustomCassandraAnnotatedPojo::getCounter)); assertThat(result, samePropertyValuesAs(customCassandraAnnotatedPojos)); }
Example #30
Source File: MapperOptions.java From flink with Apache License 2.0 | 2 votes |
/** * Returns an array of {@link com.datastax.driver.mapping.Mapper.Option} that are used configure the mapper. * * @return array of options used to configure the mapper. */ Mapper.Option[] getMapperOptions();