org.apache.flink.streaming.connectors.kafka.testutils.IntegerSource Java Examples
The following examples show how to use
org.apache.flink.streaming.connectors.kafka.testutils.IntegerSource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProducerTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * This test sets KafkaProducer so that it will automatically flush the data and * and fails the broker to check whether flushed records since last checkpoint were not duplicated. */ protected void testExactlyOnce(boolean regularSink, int sinksCount) throws Exception { final String topic = (regularSink ? "exactlyOnceTopicRegularSink" : "exactlyTopicCustomOperator") + sinksCount; final int partition = 0; final int numElements = 1000; final int failAfterElements = 333; for (int i = 0; i < sinksCount; i++) { createTestTopic(topic + i, 1, 1); } TypeInformationSerializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig()); KeyedSerializationSchema<Integer> keyedSerializationSchema = new KeyedSerializationSchemaWrapper<>(schema); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(500); env.setParallelism(1); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); env.getConfig().disableSysoutLogging(); Properties properties = new Properties(); properties.putAll(standardProps); properties.putAll(secureProps); // process exactly failAfterElements number of elements and then shutdown Kafka broker and fail application List<Integer> expectedElements = getIntegersSequence(numElements); DataStream<Integer> inputStream = env .addSource(new IntegerSource(numElements)) .map(new FailingIdentityMapper<Integer>(failAfterElements)); for (int i = 0; i < sinksCount; i++) { FlinkKafkaPartitioner<Integer> partitioner = new FlinkKafkaPartitioner<Integer>() { @Override public int partition(Integer record, byte[] key, byte[] value, String targetTopic, int[] partitions) { return partition; } }; if (regularSink) { StreamSink<Integer> kafkaSink = kafkaServer.getProducerSink(topic + i, keyedSerializationSchema, properties, partitioner); inputStream.addSink(kafkaSink.getUserFunction()); } else { kafkaServer.produceIntoKafka(inputStream, topic + i, keyedSerializationSchema, properties, partitioner); } } FailingIdentityMapper.failedBefore = false; TestUtils.tryExecute(env, "Exactly once test"); for (int i = 0; i < sinksCount; i++) { // assert that before failure we successfully snapshot/flushed all expected elements assertExactlyOnceForTopic( properties, topic + i, partition, expectedElements, KAFKA_READ_TIMEOUT); deleteTestTopic(topic + i); } }
Example #2
Source File: KafkaProducerTestBase.java From flink with Apache License 2.0 | 4 votes |
/** * This test sets KafkaProducer so that it will automatically flush the data and * and fails the broker to check whether flushed records since last checkpoint were not duplicated. */ protected void testExactlyOnce(boolean regularSink, int sinksCount) throws Exception { final String topic = (regularSink ? "exactlyOnceTopicRegularSink" : "exactlyTopicCustomOperator") + sinksCount; final int partition = 0; final int numElements = 1000; final int failAfterElements = 333; for (int i = 0; i < sinksCount; i++) { createTestTopic(topic + i, 1, 1); } TypeInformationSerializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig()); KeyedSerializationSchema<Integer> keyedSerializationSchema = new KeyedSerializationSchemaWrapper<>(schema); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(500); env.setParallelism(1); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); env.getConfig().disableSysoutLogging(); Properties properties = new Properties(); properties.putAll(standardProps); properties.putAll(secureProps); // process exactly failAfterElements number of elements and then shutdown Kafka broker and fail application List<Integer> expectedElements = getIntegersSequence(numElements); DataStream<Integer> inputStream = env .addSource(new IntegerSource(numElements)) .map(new FailingIdentityMapper<Integer>(failAfterElements)); for (int i = 0; i < sinksCount; i++) { FlinkKafkaPartitioner<Integer> partitioner = new FlinkKafkaPartitioner<Integer>() { @Override public int partition(Integer record, byte[] key, byte[] value, String targetTopic, int[] partitions) { return partition; } }; if (regularSink) { StreamSink<Integer> kafkaSink = kafkaServer.getProducerSink(topic + i, keyedSerializationSchema, properties, partitioner); inputStream.addSink(kafkaSink.getUserFunction()); } else { kafkaServer.produceIntoKafka(inputStream, topic + i, keyedSerializationSchema, properties, partitioner); } } FailingIdentityMapper.failedBefore = false; TestUtils.tryExecute(env, "Exactly once test"); for (int i = 0; i < sinksCount; i++) { // assert that before failure we successfully snapshot/flushed all expected elements assertExactlyOnceForTopic( properties, topic + i, partition, expectedElements, KAFKA_READ_TIMEOUT); deleteTestTopic(topic + i); } }
Example #3
Source File: KafkaProducerTestBase.java From flink with Apache License 2.0 | 4 votes |
/** * This test sets KafkaProducer so that it will automatically flush the data and * and fails the broker to check whether flushed records since last checkpoint were not duplicated. */ protected void testExactlyOnce(boolean regularSink, int sinksCount) throws Exception { final String topic = (regularSink ? "exactlyOnceTopicRegularSink" : "exactlyTopicCustomOperator") + sinksCount; final int partition = 0; final int numElements = 1000; final int failAfterElements = 333; for (int i = 0; i < sinksCount; i++) { createTestTopic(topic + i, 1, 1); } TypeInformationSerializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig()); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(500); env.setParallelism(1); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0)); Properties properties = new Properties(); properties.putAll(standardProps); properties.putAll(secureProps); // process exactly failAfterElements number of elements and then shutdown Kafka broker and fail application List<Integer> expectedElements = getIntegersSequence(numElements); DataStream<Integer> inputStream = env .addSource(new IntegerSource(numElements)) .map(new FailingIdentityMapper<Integer>(failAfterElements)); for (int i = 0; i < sinksCount; i++) { FlinkKafkaPartitioner<Integer> partitioner = new FlinkKafkaPartitioner<Integer>() { @Override public int partition(Integer record, byte[] key, byte[] value, String targetTopic, int[] partitions) { return partition; } }; if (regularSink) { StreamSink<Integer> kafkaSink = kafkaServer.getProducerSink(topic + i, schema, properties, partitioner); inputStream.addSink(kafkaSink.getUserFunction()); } else { kafkaServer.produceIntoKafka(inputStream, topic + i, schema, properties, partitioner); } } FailingIdentityMapper.failedBefore = false; TestUtils.tryExecute(env, "Exactly once test"); for (int i = 0; i < sinksCount; i++) { // assert that before failure we successfully snapshot/flushed all expected elements assertExactlyOnceForTopic( properties, topic + i, partition, expectedElements, KAFKA_READ_TIMEOUT); deleteTestTopic(topic + i); } }