Java Code Examples for kafka.consumer.ConsumerIterator#hasNext()
The following examples show how to use
kafka.consumer.ConsumerIterator#hasNext() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JavaKafkaConsumerHighAPIESImpl.java From dk-fitting with Apache License 2.0 | 6 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); // 2.2 输出 // logger.info(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); // System.out.println(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); try { ElasticsearchUtils.sendToES(esIps,esPort,esClusterName, indexName,typeName, value.message().toString(),providerProp.getProperty("consumer.es.kafkaMessage.separator"), Boolean.parseBoolean(providerProp.getProperty("consumer.es.kafkaMessage.isJsonMessage"))); } catch (Exception e) { e.printStackTrace(); } } // 3. 表示当前线程执行完成 logger.info("Shutdown Thread:" + this.threadNumber); }
Example 2
Source File: Consumer.java From cep with GNU Affero General Public License v3.0 | 6 votes |
/** * Starts the consumer thread. */ @Override public void run() { log.debug("Starting consumer for topic {}", topic); ConsumerIterator<byte[], byte[]> it = stream.iterator(); // For each message present on the partition... while (it.hasNext()) { Map<String, Object> event = null; // Parse it with the parser associated with the topic try { event = parser.parse(new String(it.next().message(), "UTF-8")); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } // Send it to the source if (event != null) { source.send(topic.getName(), event); } } log.debug("Finished consumer for topic {}", topic); }
Example 3
Source File: KafkaConsumer.java From sqoop-on-spark with Apache License 2.0 | 6 votes |
public MessageAndMetadata getNextMessage(String topic) { List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); // it has only a single stream, because there is only one consumer KafkaStream stream = streams.get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); int counter = 0; try { if (it.hasNext()) { return it.next(); } else { return null; } } catch (ConsumerTimeoutException e) { logger.error("0 messages available to fetch for the topic " + topic); return null; } }
Example 4
Source File: KafkaWorker.java From elasticsearch-river-kafka with Apache License 2.0 | 6 votes |
/** * Consumes the messages from the partition via specified stream. */ private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) { try { // by default it waits forever for message, but there is timeout configured final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator(); // Consume all the messages of the stream (partition) while (consumerIterator.hasNext() && consume) { final MessageAndMetadata messageAndMetadata = consumerIterator.next(); logMessage(messageAndMetadata); elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata); // StatsD reporting stats.messagesReceived.incrementAndGet(); stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset()); } } catch (ConsumerTimeoutException ex) { logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume); } }
Example 5
Source File: KafkaDistributed.java From jlogstash-input-plugin with Apache License 2.0 | 6 votes |
public void run() { try { while(true){ ConsumerIterator<byte[], byte[]> it = m_stream.iterator(); while (it.hasNext()) { String m = null; try { m = new String(it.next().message(), this.kafkaInput.encoding); Map<String, Object> event = this.decoder .decode(m); if(zkDistributed==null){ this.kafkaInput.process(event); }else{ zkDistributed.route(event); } } catch (Exception e) { logger.error("process event:{} failed:{}",m,ExceptionUtil.getErrorMessage(e)); } } } } catch (Exception t) { logger.error("kakfa Consumer fetch is error:{}",ExceptionUtil.getErrorMessage(t)); } }
Example 6
Source File: KafkaMqCollect.java From light_drtc with Apache License 2.0 | 6 votes |
public void collectMq(){ Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(Constants.kfTopic, new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder); KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0); ConsumerIterator<String, String> it = stream.iterator(); MessageAndMetadata<String, String> msgMeta; while (it.hasNext()){ msgMeta = it.next(); super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message()); //System.out.println(msgMeta.key()+"\t"+msgMeta.message()); } }
Example 7
Source File: KafkaConsumer.java From flume-ng-kafka-sink with Apache License 2.0 | 6 votes |
public MessageAndMetadata getNextMessage(String topic){ List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); KafkaStream stream = streams.get(0); // it has only a single stream, because there is only one consumer final ConsumerIterator<byte[], byte[]> it = stream.iterator(); int counter = 0; while (!it.hasNext()){ // Wait time >= 10s, so return null and exit if(counter == 5){ logger.error("0 messages available to fetch for the topic " + topic); return null; } // wait till a message is published. this is a blocking call. try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { // ignore } counter++; } return it.next(); }
Example 8
Source File: ConsumerWorker.java From yuzhouwan with Apache License 2.0 | 6 votes |
@Override public void run() { ConsumerIterator<byte[], byte[]> iter = kafkaStream.iterator(); MessageAndMetadata<byte[], byte[]> msg; int total = 0, fail = 0, success = 0; long start = System.currentTimeMillis(); while (iter.hasNext()) { try { msg = iter.next(); _log.info("Thread {}: {}", threadNum, new String(msg.message(), StandardCharsets.UTF_8)); _log.info("partition: {}, offset: {}", msg.partition(), msg.offset()); success++; } catch (Exception e) { _log.error("", e); fail++; } _log.info("Count [fail/success/total]: [{}/{}/{}], Time: {}s", fail, success, ++total, (System.currentTimeMillis() - start) / 1000); } }
Example 9
Source File: JavaKafkaConsumerHighAPIHbaseImpl.java From dk-fitting with Apache License 2.0 | 6 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); // 2.2 输出 // logger.info(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); // System.out.println(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); try { HbaseUtils.insertData(providerProp.getProperty("consumer.hbase.tablename"), providerProp.getProperty("consumer.hbase.columnFamilyName"),value.message().toString()); } catch (Exception e) { e.printStackTrace(); } } // 3. 表示当前线程执行完成 logger.info("Shutdown Thread:" + this.threadNumber); }
Example 10
Source File: KafkaConsumer.java From blog_demos with Apache License 2.0 | 5 votes |
/** * 启动一个consumer * @param topic */ public void startConsume(String topic){ Properties props = new Properties(); props.put("zookeeper.connect", zkConnect); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "40000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "1000"); ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); Runnable executor = new Runnable() { @Override public void run() { while (it.hasNext()) { System.out.println("************** receive:" + new String(it.next().message())); try { Thread.sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } } } }; new Thread(executor).start(); }
Example 11
Source File: KafkaDemoClient.java From iotplatform with Apache License 2.0 | 5 votes |
private static void startConsumer() throws InterruptedException { ConsumerIterator<String, String> it = buildConsumer(CONSUMER_TOPIC); do { if (it.hasNext()) { MessageAndMetadata<String, String> messageAndMetadata = it.next(); System.out.println(String.format("Kafka message [%s]", messageAndMetadata.message())); } Thread.sleep(100); } while (true); }
Example 12
Source File: ReleaseDecryptConsumerServiceImpl.java From OpenIoE with Apache License 2.0 | 5 votes |
@Override public void run() { Map<String, Integer> topicMap = new HashMap<String, Integer>(); topicMap.put(CryptoIntegrationConstants.TOPIC_CRYPTO_RELEASE_DECRYPT_CC, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(CryptoIntegrationConstants.TOPIC_CRYPTO_RELEASE_DECRYPT_CC).get(0); ConsumerIterator<byte[], byte[]> it = stream.iterator(); while(it.hasNext()) { String sessionId = new String(it.next().message()); cryptoService.releaseDecryptCCVolatile(sessionId); } }
Example 13
Source File: KafkaMessageProcessorIT.java From mod-kafka with Apache License 2.0 | 5 votes |
private void consumeMessages() { final Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(KafkaProperties.DEFAULT_TOPIC, 1); final StringDecoder decoder = new StringDecoder(new VerifiableProperties()); final Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, decoder, decoder); final KafkaStream<String, String> stream = consumerMap.get(KafkaProperties.DEFAULT_TOPIC).get(0); final ConsumerIterator<String, String> iterator = stream.iterator(); Thread kafkaMessageReceiverThread = new Thread( new Runnable() { @Override public void run() { while (iterator.hasNext()) { String msg = iterator.next().message(); msg = msg == null ? "<null>" : msg; System.out.println("got message: " + msg); messagesReceived.add(msg); } } }, "kafkaMessageReceiverThread" ); kafkaMessageReceiverThread.start(); }
Example 14
Source File: KafkaTestConsumer.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Override public void run() { Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, new Integer(1)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); ConsumerIterator<byte[], byte[]> it = stream.iterator(); logger.debug("Inside consumer::run receiveCount= {}", receiveCount); while (it.hasNext() & isAlive) { Message msg = new Message(it.next().message()); if (latch != null) { latch.countDown(); } if (getMessage(msg).equals(KafkaOperatorTestBase.END_TUPLE)) { break; } holdingBuffer.add(msg); receiveCount++; logger.debug("Consuming {}, receiveCount= {}", getMessage(msg), receiveCount); try { Thread.sleep(50); } catch (InterruptedException e) { break; } } logger.debug("DONE consuming"); }
Example 15
Source File: DemoHighLevelConsumer.java From KafkaExample with Apache License 2.0 | 5 votes |
public static void main(String[] args) { args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" }; if (args == null || args.length != 4) { System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}"); System.exit(1); } String zk = args[0]; String topic = args[1]; String groupid = args[2]; String consumerid = args[3]; Properties props = new Properties(); props.put("zookeeper.connect", zk); props.put("group.id", groupid); props.put("client.id", "test"); props.put("consumer.id", consumerid); props.put("auto.offset.reset", "largest"); props.put("auto.commit.enable", "false"); props.put("auto.commit.interval.ms", "60000"); ConsumerConfig consumerConfig = new ConsumerConfig(props); ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0); ConsumerIterator<byte[], byte[]> interator = stream1.iterator(); while (interator.hasNext()) { MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next(); String message = String.format( "Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s", messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(), messageAndMetadata.offset(), new String(messageAndMetadata.key()), new String(messageAndMetadata.message())); System.out.println(message); consumerConnector.commitOffsets(); } }
Example 16
Source File: ConsumerTest.java From product-cep with Apache License 2.0 | 5 votes |
public void run() { try { log.info("start consuming"); ConsumerIterator<byte[], byte[]> iterator = kafkaStream.iterator(); Pattern eventPattern = Pattern.compile("(timestamp\":(\\d+))"); while (iterator.hasNext()) { String message = new String(iterator.next().message()); receivedTime = System.currentTimeMillis(); //Time stamp pattern match for json format event Matcher eventPatternMatcher = eventPattern.matcher(message); if (eventPatternMatcher.find()) { sentTime = Long.parseLong(eventPatternMatcher.group(2)); } else { log.error("unable to extract timestamp from received event"); } latency.addAndGet(receivedTime - sentTime); eventCount.incrementAndGet(); if (eventCount.get() % elapsedCount == 0) { long currentTime = System.currentTimeMillis(); long elapsedTime = currentTime - lastTime.getAndSet(currentTime); double throughputPerSecond = (((double) elapsedCount) / elapsedTime) * 1000; log.info("Received " + elapsedCount + " sensor events in " + elapsedTime + " milliseconds with total throughput of " + decimalFormat.format(throughputPerSecond) + " events per second. Average latency is " + (double) latency.get() / elapsedCount + " milliseconds per event."); latency.set(0); } } log.info("Received Total of " + eventCount.get() + " sensor events"); } catch (Throwable t) { log.error("Error when receiving messages", t); } }
Example 17
Source File: KafkaMessageReceiverPool.java From message-queue-client-framework with Apache License 2.0 | 5 votes |
@Override public void run() { logger.info(Thread.currentThread().getName() + " clientId: " + stream.clientId() + " start."); ConsumerIterator<K, V> it = stream.iterator(); while (it.hasNext()) { MessageAndMetadata<K, V> messageAndMetadata = it.next(); try { this.adapter.messageAdapter(messageAndMetadata); } catch (MQException e) { if (receiverRetry != null) receiverRetry.receiveMessageRetry(messageAndMetadata); logger.error("Receive message failed." + " topic: " + messageAndMetadata.topic() + " offset: " + messageAndMetadata.offset() + " partition: " + messageAndMetadata.partition(), e); } finally { /* commitOffsets */ if (!getAutoCommit()) { consumer.commitOffsets(Collections.singletonMap( TopicAndPartition.apply(messageAndMetadata.topic(), messageAndMetadata.partition()), OffsetAndMetadata.apply(messageAndMetadata.offset() + 1)), true); } } } logger.info(Thread.currentThread().getName() + " clientId: " + stream.clientId() + " end."); }
Example 18
Source File: JavaKafkaConsumerHighAPIHdfsImpl.java From dk-fitting with Apache License 2.0 | 5 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); count++; // 2.2 输出 // logger.info(count + ":" + this.threadNumber + ":" + value.offset() +":" + value.key() + ":" + value.message()); // System.out.println(count + ":" + this.threadNumber + ":" + value.offset() +":" + value.key() + ":" + value.message()); try { String hdfs_xml = providerProp.getProperty("consumer.hdfs.hdfs.path"); String core_xml = providerProp.getProperty("consumer.hdfs.core.path"); String krb5_conf = providerProp.getProperty("consumer.hdfs.krb5.path"); String principal = providerProp.getProperty("consumer.hdfs.principal.path"); String keytab = providerProp.getProperty("consumer.hdfs.keytab.pat"); HDFSUtils.sendToHDFS(hdfs_xml,core_xml,krb5_conf,principal,keytab, hdfsPath + "/" + this.threadNumber,value.message().toString() + "\n"); //+ this.threadNumber,value.message().toString() + "\n"); } catch (Exception e) { e.printStackTrace(); } } // 3. 表示当前线程执行完成 System.out.println("Shutdown Thread:" + this.threadNumber); }
Example 19
Source File: ThrottlingManagerEstimatorConsumerFactory.java From warp10-platform with Apache License 2.0 | 4 votes |
@Override public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) { return new Runnable() { @Override public void run() { ConsumerIterator<byte[],byte[]> iter = stream.iterator(); // Iterate on the messages TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory()); KafkaOffsetCounters counters = pool.getCounters(); try { while (iter.hasNext()) { // // Since the call to 'next' may block, we need to first // check that there is a message available // boolean nonEmpty = iter.nonEmpty(); if (nonEmpty) { MessageAndMetadata<byte[], byte[]> msg = iter.next(); counters.count(msg.partition(), msg.offset()); byte[] data = msg.message(); Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_MESSAGES, Sensision.EMPTY_LABELS, 1); Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_BYTES, Sensision.EMPTY_LABELS, data.length); if (null != macKey) { data = CryptoUtils.removeMAC(macKey, data); } // Skip data whose MAC was not verified successfully if (null == data) { Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_INVALIDMACS, Sensision.EMPTY_LABELS, 1); continue; } // // Update throttling manager // try { ThrottlingManager.fuse(HyperLogLogPlus.fromBytes(data)); Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS, Sensision.EMPTY_LABELS, 1); } catch (Exception e) { Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS_FAILED, Sensision.EMPTY_LABELS, 1); } } } } catch (Throwable t) { t.printStackTrace(System.err); } finally { // Set abort to true in case we exit the 'run' method pool.getAbort().set(true); } } }; }
Example 20
Source File: KafkaConsumerRunnableBasic.java From monasca-persister with Apache License 2.0 | 2 votes |
public void run() { logger.info("[{}]: run", this.threadId); active = true; final ConsumerIterator<byte[], byte[]> it = kafkaChannel.getKafkaStream().iterator(); logger.debug("[{}]: KafkaChannel has stream iterator", this.threadId); while (!this.stop) { try { try { if (isInterrupted()) { logger.debug("[{}]: is interrupted", this.threadId); break; } if (it.hasNext()) { if (isInterrupted()) { logger.debug("[{}]: is interrupted", this.threadId); break; } if (this.stop) { logger.debug("[{}]: is stopped", this.threadId); break; } final String msg = new String(it.next().message()); if (logger.isDebugEnabled()) { logger.debug("[{}]: {}", this.threadId, msg); } publishEvent(msg); } } catch (kafka.consumer.ConsumerTimeoutException cte) { if (isInterrupted()) { logger.debug("[{}]: is interrupted", this.threadId); break; } if (this.stop) { logger.debug("[{}]: is stopped", this.threadId); break; } publishHeartbeat(); } } catch (Throwable e) { logger .error("[{}]: caught fatal exception while publishing msg. Shutting entire persister down " + "now!", this.threadId, e); logger.error("[{}]: calling shutdown on executor service", this.threadId); this.executorService.shutdownNow(); logger.error("[{}]: shutting down system. calling system.exit(1)", this.threadId); System.exit(1); } } logger.info("[{}]: calling stop on kafka channel", this.threadId); active = false; this.kafkaChannel.stop(); logger.debug("[{}]: exiting main run loop", this.threadId); }