kafka.javaapi.consumer.ConsumerConnector Java Examples
The following examples show how to use
kafka.javaapi.consumer.ConsumerConnector.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HighlevelKafkaConsumer.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void create() { super.create(); if (standardConsumer == null) { standardConsumer = new HashMap<String, ConsumerConnector>(); } // This is important to let kafka know how to distribute the reads among // different consumers in same consumer group // Don't reuse any id for recovery to avoid rebalancing error because // there is some delay for zookeeper to // find out the old consumer is dead and delete the entry even new // consumer is back online consumerConfig.put("consumer.id", "consumer" + System.currentTimeMillis()); if (initialOffset.equalsIgnoreCase("earliest")) { consumerConfig.put("auto.offset.reset", "smallest"); } else { consumerConfig.put("auto.offset.reset", "largest"); } }
Example #2
Source File: Kafka.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void addNewConsumer(String topic, Integer threads){ ConsumerConnector consumer = consumerConnMap.get(topic); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = null; Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, threads); consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ExecutorService executor = Executors.newFixedThreadPool(threads); for (final KafkaStream<byte[], byte[]> stream : streams) { executor.submit(new Consumer(stream, this)); } executorMap.put(topic, executor); }
Example #3
Source File: KafkaDistributed.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void reconnConsumer(String topicName){ //停止topic 对应的conn ConsumerConnector consumerConn = consumerConnMap.get(topicName); consumerConn.commitOffsets(true); consumerConn.shutdown(); consumerConnMap.remove(topicName); //停止topic 对应的stream消耗线程 ExecutorService es = executorMap.get(topicName); es.shutdownNow(); executorMap.remove(topicName); Properties prop = geneConsumerProp(); ConsumerConnector newConsumerConn = kafka.consumer.Consumer .createJavaConsumerConnector(new ConsumerConfig(prop)); consumerConnMap.put(topicName, newConsumerConn); addNewConsumer(topicName, topic.get(topicName)); }
Example #4
Source File: KafkaDistributed.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
@Override public void release() { try { for(ConsumerConnector consumer : consumerConnMap.values()){ consumer.commitOffsets(true); consumer.shutdown(); } for(ExecutorService executor : executorMap.values()){ executor.shutdownNow(); } if(scheduleExecutor != null){ scheduleExecutor.shutdownNow(); } this.zkDistributed.realse(); } catch (Exception e) { // TODO Auto-generated catch block logger.error(ExceptionUtil.getErrorMessage(e)); } }
Example #5
Source File: KafkaDistributed.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void addNewConsumer(String topic, Integer threads){ ConsumerConnector consumer = consumerConnMap.get(topic); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = null; Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, threads); consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ExecutorService executor = Executors.newFixedThreadPool(threads); for (final KafkaStream<byte[], byte[]> stream : streams) { executor.submit(new Consumer(stream, this)); } executorMap.put(topic, executor); }
Example #6
Source File: KafkaDistributed.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public void prepare() { Properties props = geneConsumerProp(); for(String topicName : topic.keySet()){ ConsumerConnector consumer = kafka.consumer.Consumer .createJavaConsumerConnector(new ConsumerConfig(props)); consumerConnMap.put(topicName, consumer); } if(distributed!=null){ try { logger.warn("zkDistributed is start..."); zkDistributed = ZkDistributed.getSingleZkDistributed(distributed); zkDistributed.zkRegistration(); } catch (Exception e) { // TODO Auto-generated catch block logger.error("zkRegistration fail:{}",ExceptionUtil.getErrorMessage(e)); } } }
Example #7
Source File: Kafka.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void reconnConsumer(String topicName){ //停止topic 对应的conn ConsumerConnector consumerConn = consumerConnMap.get(topicName); consumerConn.commitOffsets(true); consumerConn.shutdown(); consumerConnMap.remove(topicName); //停止topic 对应的stream消耗线程 ExecutorService es = executorMap.get(topicName); es.shutdownNow(); executorMap.remove(topicName); Properties prop = geneConsumerProp(); ConsumerConnector newConsumerConn = kafka.consumer.Consumer .createJavaConsumerConnector(new ConsumerConfig(prop)); consumerConnMap.put(topicName, newConsumerConn); addNewConsumer(topicName, topic.get(topicName)); }
Example #8
Source File: Kafka08Fetcher.java From indexr with Apache License 2.0 | 6 votes |
@Override public synchronized void close() throws IOException { logger.debug("Stop kafka fetcher. [topic: {}]", topics); ConsumerConnector connector = this.connector; this.connector = null; if (connector != null) { connector.commitOffsets(); connector.shutdown(); } IOUtil.closeQuietly(eventItr); // Some events could exists in the buffer, try to save them. List<byte[]> remaining = new ArrayList<>(); try { while (eventItr.hasNext()) { remaining.add(eventItr.next()); } } catch (Exception e) { // Ignore } eventItr = null; if (!remaining.isEmpty()) { this.remaining = remaining; } }
Example #9
Source File: KafkaChannel.java From flume-plugin with Apache License 2.0 | 6 votes |
private synchronized ConsumerAndIterator createConsumerAndIter() { try { ConsumerConfig consumerConfig = new ConsumerConfig(kafkaConf); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); final List<KafkaStream<byte[], byte[]>> streamList = consumerMap .get(topic.get()); KafkaStream<byte[], byte[]> stream = streamList.remove(0); ConsumerAndIterator ret = new ConsumerAndIterator(consumer, stream.iterator(), channelUUID); consumers.add(ret); LOGGER.info("Created new consumer to connect to Kafka"); return ret; } catch (Exception e) { throw new FlumeException("Unable to connect to Kafka", e); } }
Example #10
Source File: kafkaConsumer.java From Transwarp-Sample-Code with MIT License | 6 votes |
/** * 创建线程池,执行kafka消费者 */ public void go() { Constant constant = new Constant(); kafkaProperties kafkaProperties = new kafkaProperties(); ConsumerConfig config = new ConsumerConfig(kafkaProperties.properties()); ExecutorService executorService = Executors.newFixedThreadPool(Integer.parseInt(constant.THREAD_POOL_SIZE)); String topic = constant.TOPIC; // Task[] tasks = new Task[Integer.parseInt(constant.THREAD_NUM)]; Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, new Integer(constant.THREAD_NUM)); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); for (KafkaStream stream : streams) { executorService.submit(new Task(stream)); } executorService.shutdown(); }
Example #11
Source File: KafkaOffsetGetter.java From Kafka-Insight with Apache License 2.0 | 6 votes |
/** * When an object implementing interface <code>Runnable</code> is used * to create a thread, starting the thread causes the object's * <code>run</code> method to be called in that separately executing * thread. * <p> * The general contract of the method <code>run</code> is that it may * take any action whatsoever. * * @see Thread#run() */ @Override public void run() { ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1)); KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0); ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator(); while (true) { MessageAndMetadata<byte[], byte[]> offsetMsg = it.next(); if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) { try { GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key())); if (offsetMsg.message() == null) { continue; } kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message())); kafkaConsumerOffsets.put(commitKey, commitValue); } catch (Exception e) { e.printStackTrace(); } } } }
Example #12
Source File: MetricsReportingSteps.java From metrics-kafka with Apache License 2.0 | 5 votes |
@Then("Kafka consumer should be able to read this data.") public void consumerReadsMetrics() throws IOException { ConsumerConnector consumer = Consumer.createJavaConsumerConnector(createConsumerConfig()); String message = readMessage(consumer); assertNotNull(message); ObjectMapper objectMapper = new ObjectMapper(); KafkaMetricsReport report = objectMapper.readValue(message, KafkaMetricsReport.class); assertNotNull(report); }
Example #13
Source File: HighlevelKafkaConsumer.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Override protected void commitOffset() { // commit the offsets at checkpoint so that high-level consumer don't // have to receive too many duplicate messages if (standardConsumer != null && standardConsumer.values() != null) { for (ConsumerConnector consumerConnector : standardConsumer.values()) { consumerConnector.commitOffsets(); } } }
Example #14
Source File: HighlevelKafkaConsumer.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Override public void close() { if (standardConsumer != null && standardConsumer.values() != null) { for (ConsumerConnector consumerConnector : standardConsumer.values()) { consumerConnector.shutdown(); } } if (consumerThreadExecutor != null) { consumerThreadExecutor.shutdown(); } }
Example #15
Source File: PutKafkaTest.java From localization_nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "0.0.0.0:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example #16
Source File: AlertKafkaPublisherTest.java From eagle with Apache License 2.0 | 5 votes |
private static void consumeWithOutput(final List<String> outputMessages) { Thread t = new Thread(new Runnable() { @Override public void run() { Properties props = new Properties(); props.put("group.id", "B"); props.put("zookeeper.connect", "127.0.0.1:" + + TEST_KAFKA_ZOOKEEPER_PORT); props.put("zookeeper.session.timeout.ms", "4000"); props.put("zookeeper.sync.time.ms", "2000"); props.put("auto.commit.interval.ms", "1000"); props.put("auto.offset.reset", "smallest"); ConsumerConnector jcc = null; try { ConsumerConfig ccfg = new ConsumerConfig(props); jcc = Consumer.createJavaConsumerConnector(ccfg); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TEST_TOPIC_NAME, 1); Map<String, List<KafkaStream<byte[], byte[]>>> topicMap = jcc.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> cstrm = topicMap.get(TEST_TOPIC_NAME).get(0); for (MessageAndMetadata<byte[], byte[]> mm : cstrm) { String message = new String(mm.message()); outputMessages.add(message); try { Thread.sleep(5000); } catch (InterruptedException e) { } } } finally { if (jcc != null) { jcc.shutdown(); } } } }); t.start(); }
Example #17
Source File: MetricsReportingSteps.java From metrics-kafka with Apache License 2.0 | 5 votes |
public String readMessage(ConsumerConnector consumer) { Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); KafkaStream<String, String> messageStream = consumer.createMessageStreamsByFilter(new Whitelist(topic), 1, new StringDecoder(null), new StringDecoder(null)).get(0); return messageStream.iterator().next().message(); }
Example #18
Source File: KafkaSourceUtil.java From flume-ng-extends-source with MIT License | 5 votes |
public static ConsumerConnector getConsumer(Properties kafkaProps) { ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProps); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); return consumer; }
Example #19
Source File: HighLevelConsumerExample.java From pulsar with Apache License 2.0 | 5 votes |
private static void consumeMessage(Arguments arguments) { Properties properties = new Properties(); properties.put("zookeeper.connect", arguments.serviceUrl); properties.put("group.id", arguments.groupName); properties.put("consumer.id", "cons1"); properties.put("auto.commit.enable", Boolean.toString(!arguments.autoCommitDisable)); properties.put("auto.commit.interval.ms", "100"); properties.put("queued.max.message.chunks", "100"); ConsumerConfig conSConfig = new ConsumerConfig(properties); ConsumerConnector connector = Consumer.createJavaConsumerConnector(conSConfig); Map<String, Integer> topicCountMap = Collections.singletonMap(arguments.topicName, 2); Map<String, List<KafkaStream<String, Tweet>>> streams = connector.createMessageStreams(topicCountMap, new StringDecoder(null), new Tweet.TestDecoder()); int count = 0; while (count < arguments.totalMessages || arguments.totalMessages == -1) { for (int i = 0; i < streams.size(); i++) { List<KafkaStream<String, Tweet>> kafkaStreams = streams.get(arguments.topicName); for (KafkaStream<String, Tweet> kafkaStream : kafkaStreams) { for (MessageAndMetadata<String, Tweet> record : kafkaStream) { log.info("Received tweet: {}-{}", record.message().userName, record.message().message); count++; } } } } connector.shutdown(); log.info("successfully consumed message {}", count); }
Example #20
Source File: KafkaClientTest.java From Krackle with Apache License 2.0 | 5 votes |
private ConsumerConnector getStdConsumer() { Properties props = new Properties(); props.put("zookeeper.connect", "localhost:21818"); props.put("group.id", "test"); ConsumerConfig conf = new ConsumerConfig(props); return kafka.consumer.Consumer.createJavaConsumerConnector(conf); }
Example #21
Source File: IngestFromKafkaDriver.java From geowave with Apache License 2.0 | 5 votes |
public <T> void consumeFromTopic( final String formatPluginName, final GeoWaveAvroFormatPlugin<T, ?> avroFormatPlugin, final KafkaIngestRunData ingestRunData, final List<String> queue) { final ConsumerConnector consumer = buildKafkaConsumer(); if (consumer == null) { throw new RuntimeException( "Kafka consumer connector is null, unable to create message streams"); } try { LOGGER.debug( "Kafka consumer setup for format [" + formatPluginName + "] against topic [" + formatPluginName + "]"); final Map<String, Integer> topicCount = new HashMap<>(); topicCount.put(formatPluginName, 1); final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams = consumer.createMessageStreams(topicCount); final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName); queue.remove(formatPluginName); consumeMessages(formatPluginName, avroFormatPlugin, ingestRunData, streams.get(0)); } finally { consumer.shutdown(); } }
Example #22
Source File: MessageResource.java From dropwizard-kafka-http with Apache License 2.0 | 5 votes |
@GET @Timed public Response consume( @QueryParam("topic") String topic, @QueryParam("timeout") Integer timeout ) { if (Strings.isNullOrEmpty(topic)) return Response.status(400) .entity(new String[]{"Undefined topic"}) .build(); Properties props = (Properties) consumerCfg.clone(); if (timeout != null) props.put("consumer.timeout.ms", "" + timeout); ConsumerConfig config = new ConsumerConfig(props); ConsumerConnector connector = Consumer.createJavaConsumerConnector(config); Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts); KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0); List<Message> messages = new ArrayList<>(); try { for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream) messages.add(new Message(messageAndMetadata)); } catch (ConsumerTimeoutException ignore) { } finally { connector.commitOffsets(); connector.shutdown(); } return Response.ok(messages).build(); }
Example #23
Source File: KafkaPublisherTest.java From nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "localhost:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example #24
Source File: Kafka.java From jlogstash-input-plugin with Apache License 2.0 | 5 votes |
@Override public void release() { for(ConsumerConnector consumer : consumerConnMap.values()){ consumer.commitOffsets(true); consumer.shutdown(); } for(ExecutorService executor : executorMap.values()){ executor.shutdownNow(); } scheduleExecutor.shutdownNow(); }
Example #25
Source File: PutKafkaTest.java From nifi with Apache License 2.0 | 5 votes |
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) { Properties props = new Properties(); props.put("zookeeper.connect", "0.0.0.0:" + kafkaLocal.getZookeeperPort()); props.put("group.id", "test"); props.put("consumer.timeout.ms", "5000"); props.put("auto.offset.reset", "smallest"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<>(1); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator(); return iter; }
Example #26
Source File: DemoHighLevelConsumer.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void main(String[] args) { args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" }; if (args == null || args.length != 4) { System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}"); System.exit(1); } String zk = args[0]; String topic = args[1]; String groupid = args[2]; String consumerid = args[3]; Properties props = new Properties(); props.put("zookeeper.connect", zk); props.put("group.id", groupid); props.put("client.id", "test"); props.put("consumer.id", consumerid); props.put("auto.offset.reset", "largest"); props.put("auto.commit.enable", "false"); props.put("auto.commit.interval.ms", "60000"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0); ConsumerIterator<byte[], byte[]> interator = stream1.iterator(); while (interator.hasNext()) { MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next(); String message = String.format( "Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s", messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(), messageAndMetadata.offset(), new String(messageAndMetadata.key()), new String(messageAndMetadata.message())); System.out.println(message); consumerConnector.commitOffsets(); } }
Example #27
Source File: Kafka08Fetcher.java From indexr with Apache License 2.0 | 5 votes |
@Override public void commit() { ConsumerConnector connector = this.connector; if (connector != null) { connector.commitOffsets(); } }
Example #28
Source File: KafkaDemoClient.java From iotplatform with Apache License 2.0 | 5 votes |
private static ConsumerIterator<String, String> buildConsumer(String topic) { Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); ConsumerConfig consumerConfig = new ConsumerConfig(consumerProperties()); ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, List<KafkaStream<String, String>>> consumers = consumerConnector.createMessageStreams(topicCountMap, new StringDecoder(null), new StringDecoder(null)); KafkaStream<String, String> stream = consumers.get(topic).get(0); return stream.iterator(); }
Example #29
Source File: KafkaConsumer.java From blog_demos with Apache License 2.0 | 5 votes |
/** * 启动一个consumer * @param topic */ public void startConsume(String topic){ Properties props = new Properties(); props.put("zookeeper.connect", zkConnect); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "40000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); Runnable executor = new Runnable() { @Override public void run() { while (it.hasNext()) { System.out.println("************** receive:" + new String(it.next().message())); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } } }; new Thread(executor).start(); }
Example #30
Source File: KafkaConsumer.java From blog_demos with Apache License 2.0 | 5 votes |
/** * 启动一个consumer * @param topic */ public void startConsume(String topic){ Properties props = new Properties(); props.put("zookeeper.connect", zkConnect); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "40000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); Runnable executor = new Runnable() { @Override public void run() { while (it.hasNext()) { System.out.println("************** receive:" + new String(it.next().message())); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } } }; new Thread(executor).start(); }