Java Code Examples for kafka.javaapi.consumer.ConsumerConnector#createMessageStreams()
The following examples show how to use
kafka.javaapi.consumer.ConsumerConnector#createMessageStreams() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: kafkaConsumer.java From Transwarp-Sample-Code with MIT License | 6 votes |
/** * 创建线程池,执行kafka消费者 */ public void go() { Constant constant = new Constant(); kafkaProperties kafkaProperties = new kafkaProperties(); ConsumerConfig config = new ConsumerConfig(kafkaProperties.properties()); ExecutorService executorService = Executors.newFixedThreadPool(Integer.parseInt(constant.THREAD_POOL_SIZE)); String topic = constant.TOPIC; // Task[] tasks = new Task[Integer.parseInt(constant.THREAD_NUM)]; Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, new Integer(constant.THREAD_NUM)); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); for (KafkaStream stream : streams) { executorService.submit(new Task(stream)); } executorService.shutdown(); }
Example 2
Source File: KafkaChannel.java From flume-plugin with Apache License 2.0 | 6 votes |
private synchronized ConsumerAndIterator createConsumerAndIter() { try { ConsumerConfig consumerConfig = new ConsumerConfig(kafkaConf); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); final List<KafkaStream<byte[], byte[]>> streamList = consumerMap .get(topic.get()); KafkaStream<byte[], byte[]> stream = streamList.remove(0); ConsumerAndIterator ret = new ConsumerAndIterator(consumer, stream.iterator(), channelUUID); consumers.add(ret); LOGGER.info("Created new consumer to connect to Kafka"); return ret; } catch (Exception e) { throw new FlumeException("Unable to connect to Kafka", e); } }
Example 3
Source File: Kafka.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void addNewConsumer(String topic, Integer threads){ ConsumerConnector consumer = consumerConnMap.get(topic); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = null; Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, threads); consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ExecutorService executor = Executors.newFixedThreadPool(threads); for (final KafkaStream<byte[], byte[]> stream : streams) { executor.submit(new Consumer(stream, this)); } executorMap.put(topic, executor); }
Example 4
Source File: PutKafkaTest.java From localization_nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "0.0.0.0:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example 5
Source File: PutKafkaTest.java From nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "0.0.0.0:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example 6
Source File: KafkaPublisherTest.java From nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "localhost:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example 7
Source File: MessageResource.java From dropwizard-kafka-http with Apache License 2.0 | 5 votes |
@GET @Timed public Response consume( @QueryParam("topic") String topic, @QueryParam("timeout") Integer timeout ) { if (Strings.isNullOrEmpty(topic)) return Response.status(400) .entity(new String[]{"Undefined topic"}) .build(); Properties props = (Properties) consumerCfg.clone(); if (timeout != null) props.put("consumer.timeout.ms", "" + timeout); ConsumerConfig config = new ConsumerConfig(props); ConsumerConnector connector = Consumer.createJavaConsumerConnector(config); Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts); KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0); List<Message> messages = new ArrayList<>(); try { for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream) messages.add(new Message(messageAndMetadata)); } catch (ConsumerTimeoutException ignore) { } finally { connector.commitOffsets(); connector.shutdown(); } return Response.ok(messages).build(); }
Example 8
Source File: IngestFromKafkaDriver.java From geowave with Apache License 2.0 | 5 votes |
public <T> void consumeFromTopic( final String formatPluginName, final GeoWaveAvroFormatPlugin<T, ?> avroFormatPlugin, final KafkaIngestRunData ingestRunData, final List<String> queue) { final ConsumerConnector consumer = buildKafkaConsumer(); if (consumer == null) { throw new RuntimeException( "Kafka consumer connector is null, unable to create message streams"); } try { LOGGER.debug( "Kafka consumer setup for format [" + formatPluginName + "] against topic [" + formatPluginName + "]"); final Map<String, Integer> topicCount = new HashMap<>(); topicCount.put(formatPluginName, 1); final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer.createMessageStreams(topicCount); final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName); queue.remove(formatPluginName); consumeMessages(formatPluginName, avroFormatPlugin, ingestRunData, streams.get(0)); } finally { consumer.shutdown(); } }
Example 9
Source File: AlertKafkaPublisherTest.java From eagle with Apache License 2.0 | 5 votes |
private static void consumeWithOutput(final List<String> outputMessages) { Thread t = new Thread(new Runnable() { @Override public void run() { Properties props = new Properties(); props.put("group.id", "B"); props.put("zookeeper.connect", "127.0.0.1:" + + TEST_KAFKA_ZOOKEEPER_PORT); props.put("zookeeper.session.timeout.ms", "4000"); props.put("zookeeper.sync.time.ms", "2000"); props.put("auto.commit.interval.ms", "1000"); props.put("auto.offset.reset", "smallest"); ConsumerConnector jcc = null; try { ConsumerConfig ccfg = new ConsumerConfig(props); jcc = Consumer.createJavaConsumerConnector(ccfg); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TEST_TOPIC_NAME, 1); Map<String, List<KafkaStream<byte[], byte[]>>> topicMap = jcc.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> cstrm = topicMap.get(TEST_TOPIC_NAME).get(0); for (MessageAndMetadata<byte[], byte[]> mm : cstrm) { String message = new String(mm.message()); outputMessages.add(message); try { Thread.sleep(5000); } catch (InterruptedException e) { } } } finally { if (jcc != null) { jcc.shutdown(); } } } }); t.start(); }
Example 10
Source File: HighLevelConsumerExample.java From pulsar with Apache License 2.0 | 5 votes |
private static void consumeMessage(Arguments arguments) { Properties properties = new Properties(); properties.put("zookeeper.connect", arguments.serviceUrl); properties.put("group.id", arguments.groupName); properties.put("consumer.id", "cons1"); properties.put("auto.commit.enable", Boolean.toString(!arguments.autoCommitDisable)); properties.put("auto.commit.interval.ms", "100"); properties.put("queued.max.message.chunks", "100"); ConsumerConfig conSConfig = new ConsumerConfig(properties); ConsumerConnector connector = Consumer.createJavaConsumerConnector(conSConfig); Map<String, Integer> topicCountMap = Collections.singletonMap(arguments.topicName, 2); Map<String, List<KafkaStream<String, Tweet>>> streams = connector.createMessageStreams(topicCountMap, new StringDecoder(null), new Tweet.TestDecoder()); int count = 0; while (count < arguments.totalMessages || arguments.totalMessages == -1) { for (int i = 0; i < streams.size(); i++) { List<KafkaStream<String, Tweet>> kafkaStreams = streams.get(arguments.topicName); for (KafkaStream<String, Tweet> kafkaStream : kafkaStreams) { for (MessageAndMetadata<String, Tweet> record : kafkaStream) { log.info("Received tweet: {}-{}", record.message().userName, record.message().message); count++; } } } } connector.shutdown(); log.info("successfully consumed message {}", count); }
Example 11
Source File: DemoHighLevelConsumer.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void main(String[] args) { args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" }; if (args == null || args.length != 4) { System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}"); System.exit(1); } String zk = args[0]; String topic = args[1]; String groupid = args[2]; String consumerid = args[3]; Properties props = new Properties(); props.put("zookeeper.connect", zk); props.put("group.id", groupid); props.put("client.id", "test"); props.put("consumer.id", consumerid); props.put("auto.offset.reset", "largest"); props.put("auto.commit.enable", "false"); props.put("auto.commit.interval.ms", "60000"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0); ConsumerIterator<byte[], byte[]> interator = stream1.iterator(); while (interator.hasNext()) { MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next(); String message = String.format( "Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s", messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(), messageAndMetadata.offset(), new String(messageAndMetadata.key()), new String(messageAndMetadata.message())); System.out.println(message); consumerConnector.commitOffsets(); } }
Example 12
Source File: KafkaDemoClient.java From iotplatform with Apache License 2.0 | 5 votes |
private static ConsumerIterator<String, String> buildConsumer(String topic) { Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); ConsumerConfig consumerConfig = new ConsumerConfig(consumerProperties()); ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, List<KafkaStream<String, String>>> consumers = consumerConnector.createMessageStreams(topicCountMap, new StringDecoder(null), new StringDecoder(null)); KafkaStream<String, String> stream = consumers.get(topic).get(0); return stream.iterator(); }
Example 13
Source File: KafkaConsumer.java From blog_demos with Apache License 2.0 | 5 votes |
/** * 启动一个consumer * @param topic */ public void startConsume(String topic){ Properties props = new Properties(); props.put("zookeeper.connect", zkConnect); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "40000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); Runnable executor = new Runnable() { @Override public void run() { while (it.hasNext()) { System.out.println("************** receive:" + new String(it.next().message())); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } } }; new Thread(executor).start(); }
Example 14
Source File: KafkaConsumer.java From blog_demos with Apache License 2.0 | 5 votes |
/** * 启动一个consumer * @param topic */ public void startConsume(String topic){ Properties props = new Properties(); props.put("zookeeper.connect", zkConnect); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "40000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); Runnable executor = new Runnable() { @Override public void run() { while (it.hasNext()) { System.out.println("************** receive:" + new String(it.next().message())); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } } }; new Thread(executor).start(); }
Example 15
Source File: KafkaExample.java From pragmatic-java-engineer with GNU General Public License v3.0 | 5 votes |
public static void consumer() { Properties props = new Properties(); props.put("zookeeper.connect", "zk1.dmp.com:2181,zk2.dmp.com:2181,zk3.dmp.com:2181"); props.put("zookeeper.session.timeout.ms", "3000"); props.put("zookeeper.sync.time.ms", "200"); props.put("group.id", "test_group"); props.put("auto.commit.interval.ms", "600"); String topic = "test_topic"; ConsumerConnector connector = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topics = new HashMap<String, Integer>(); int partitionNum = 3;//分区数目 topics.put(topic, partitionNum); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topics); List<KafkaStream<byte[], byte[]>> partitions = streams.get(topic); Executor threadPool = Executors.newFixedThreadPool(partitionNum); for (final KafkaStream<byte[], byte[]> partition : partitions) { threadPool.execute( new Runnable() { @Override public void run() { ConsumerIterator<byte[], byte[]> it = partition.iterator(); while (it.hasNext()) { MessageAndMetadata<byte[], byte[]> item = it.next(); byte[] messageBody = item.message(); } } }); } }
Example 16
Source File: KafkaPublisherTest.java From localization_nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "localhost:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example 17
Source File: TestKafkaSinkV2.java From suro with Apache License 2.0 | 4 votes |
/** Tests backward compatability with old Kafka sink. */ @Test public void testBackwardCompatability() throws Exception { int numPartitions = 9; TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT, "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)})); String keyTopicMap = String.format(" \"keyTopicMap\": {\n" + " \"%s\": \"key\"\n" + " }", TOPIC_NAME_BACKWARD_COMPAT); String description1 = "{\n" + " \"type\": \"kafkaV1\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"ack\": 1,\n" + keyTopicMap + "\n" + "}"; String description2 = "{\n" + " \"type\": \"kafkaV2\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1,\n" + keyTopicMap + "\n" + "}"; // setup sinks, both old and new versions ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1")); jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2")); jsonMapper.setInjectableValues(new InjectableValues() { @Override public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) { if (valueId.equals(KafkaRetentionPartitioner.class.getName())) { return new KafkaRetentionPartitioner(); } else { return null; } } }); KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){}); KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){}); sinkV1.open(); sinkV2.open(); List<Sink> sinks = new ArrayList<Sink>(); sinks.add(sinkV1); sinks.add(sinkV2); // setup Kafka consumer (to read back messages) ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector( createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid")); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0); // Send 20 test message, using the old and new Kafka sinks. // Retrieve the messages and ensure that they are identical and sent to the same partition. Random rand = new Random(); int messageCount = 20; for (int i = 0; i < messageCount; ++i) { Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>() .put("key", new Long( rand.nextLong() ) ) .put("value", "message:" + i).build(); // send message to both sinks for( Sink sink : sinks ){ sink.writeTo(new DefaultMessageContainer( new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)), jsonMapper)); } // read two copies of message back from Kafka and check that partitions and data match MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next(); MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next(); System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() ); System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() ); assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition()); String msg1Str = new String( msgAndMeta1.message() ); String msg2Str = new String( msgAndMeta2.message() ); System.out.println( "iteration: "+i+" message1: "+msg1Str ); System.out.println( "iteration: "+i+" message2: "+msg2Str ); assertEquals(msg1Str, msg2Str); } // close sinks sinkV1.close(); sinkV2.close(); // close consumer try { stream.iterator().next(); fail(); // there should be no data left to consume } catch (ConsumerTimeoutException e) { //this is expected consumer.shutdown(); } }
Example 18
Source File: ITZipkinReceiver.java From incubator-retired-htrace with Apache License 2.0 | 4 votes |
@Test public void testKafkaTransport() throws Exception { String topic = "zipkin"; // Kafka setup EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect()); ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false); KafkaConfig config = new KafkaConfig(props); KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime()); Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer)); TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties()); zkClient.close(); TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000); // HTrace HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs( "sampler.classes", "AlwaysSampler", "span.receiver.classes", ZipkinSpanReceiver.class.getName(), "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(), "zipkin.kafka.topic", topic, ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName() ); final Tracer tracer = new Tracer.Builder("test-tracer") .tracerPool(new TracerPool("test-tracer-pool")) .conf(hTraceConfiguration) .build(); String scopeName = "test-kafka-transport-scope"; TraceScope traceScope = tracer.newScope(scopeName); traceScope.close(); tracer.close(); // Kafka consumer Properties consumerProps = new Properties(); consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect")); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest"); ConsumerConnector connector = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap); ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator(); // Test Assert.assertTrue("We should have one message in Kafka", it.hasNext()); Span span = new Span(); new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message()); Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName); kafkaServer.shutdown(); }
Example 19
Source File: TestKafkaSink.java From suro with Apache License 2.0 | 4 votes |
@Test public void testConfigBackwardCompatible() throws IOException { int numPartitions = 9; TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT, "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)})); String keyTopicMap = String.format(" \"keyTopicMap\": {\n" + " \"%s\": \"key\"\n" + " }", TOPIC_NAME_BACKWARD_COMPAT); String description1 = "{\n" + " \"type\": \"Kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"ack\": 1,\n" + " \"compression.type\": \"snappy\",\n" + keyTopicMap + "\n" + "}"; String description2 = "{\n" + " \"type\": \"Kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1,\n" + " \"compression.codec\": \"snappy\",\n" + keyTopicMap + "\n" + "}"; // setup sinks, both old and new versions ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka")); jsonMapper.setInjectableValues(new InjectableValues() { @Override public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) { if (valueId.equals(KafkaRetentionPartitioner.class.getName())) { return new KafkaRetentionPartitioner(); } else { return null; } } }); KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){}); KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){}); sink1.open(); sink2.open(); List<Sink> sinks = new ArrayList<Sink>(); sinks.add(sink1); sinks.add(sink2); // setup Kafka consumer (to read back messages) ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector( createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid")); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0); // Send 20 test message, using the old and new Kafka sinks. // Retrieve the messages and ensure that they are identical and sent to the same partition. Random rand = new Random(); int messageCount = 20; for (int i = 0; i < messageCount; ++i) { Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>() .put("key", new Long( rand.nextLong() ) ) .put("value", "message:" + i).build(); // send message to both sinks for( Sink sink : sinks ){ sink.writeTo(new DefaultMessageContainer( new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)), jsonMapper)); } // read two copies of message back from Kafka and check that partitions and data match MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next(); MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next(); System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() ); System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() ); assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition()); String msg1Str = new String( msgAndMeta1.message() ); String msg2Str = new String( msgAndMeta2.message() ); System.out.println( "iteration: "+i+" message1: "+msg1Str ); System.out.println( "iteration: "+i+" message2: "+msg2Str ); assertEquals(msg1Str, msg2Str); } // close sinks sink1.close(); sink2.close(); // close consumer try { stream.iterator().next(); fail(); // there should be no data left to consume } catch (ConsumerTimeoutException e) { //this is expected consumer.shutdown(); } }
Example 20
Source File: TestKafkaSinkV2.java From suro with Apache License 2.0 | 4 votes |
@Test public void testMultithread() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1,\n" + " \"batchSize\": 10,\n" + " \"jobQueueSize\": 3\n" + "}"; ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka")); KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){}); sink.open(); int msgCount = 10000; for (int i = 0; i < msgCount; ++i) { Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>() .put("key", Integer.toString(i)) .put("value", "message:" + i).build(); sink.writeTo(new DefaultMessageContainer( new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)), jsonMapper)); } assertTrue(sink.getNumOfPendingMessages() > 0); sink.close(); System.out.println(sink.getStat()); assertEquals(sink.getNumOfPendingMessages(), 0); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector( createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread")); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0); for (int i = 0; i < msgCount; ++i) { stream.iterator().next(); } try { stream.iterator().next(); fail(); } catch (ConsumerTimeoutException e) { //this is expected consumer.shutdown(); } }