kafka.message.MessageAndMetadata Java Examples
The following examples show how to use
kafka.message.MessageAndMetadata.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ConsumerWorker.java From yuzhouwan with Apache License 2.0 | 6 votes |
@Override public void run() { ConsumerIterator<byte[], byte[]> iter = kafkaStream.iterator(); MessageAndMetadata<byte[], byte[]> msg; int total = 0, fail = 0, success = 0; long start = System.currentTimeMillis(); while (iter.hasNext()) { try { msg = iter.next(); _log.info("Thread {}: {}", threadNum, new String(msg.message(), StandardCharsets.UTF_8)); _log.info("partition: {}, offset: {}", msg.partition(), msg.offset()); success++; } catch (Exception e) { _log.error("", e); fail++; } _log.info("Count [fail/success/total]: [{}/{}/{}], Time: {}s", fail, success, ++total, (System.currentTimeMillis() - start) / 1000); } }
Example #2
Source File: IndexingMangerUtil.java From linden with Apache License 2.0 | 6 votes |
public static IndexingManager initIndexingManger(LindenConfig config, ShardingStrategy shardingStrategy, LindenCore lindenCore) throws IOException { IndexingManager indexingManager = null; LindenPluginManager pluginManager = config.getPluginManager(); LindenGateway gateway = pluginManager.getInstance(LindenConfigBuilder.GATEWAY, LindenGateway.class); if (gateway != null) { DataProvider dataProvider = gateway.buildDataProvider(); if (dataProvider != null) { if (dataProvider.getType() == String.class) { indexingManager = new StringIndexingManager(config, shardingStrategy, lindenCore, dataProvider); } else if (dataProvider.getType() == MessageAndMetadata.class) { indexingManager = new KafkaIndexingManager(config, shardingStrategy, lindenCore, dataProvider); } else { throw new IOException("Unsupported data provider type"); } indexingManager.start(); } } return indexingManager; }
Example #3
Source File: KafkaIndexingManager.java From linden with Apache License 2.0 | 6 votes |
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy, LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) { super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() { @Override public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) { LindenIndexRequest indexRequest = null; long offset = messageAndMetadata.offset(); long partition = messageAndMetadata.partition(); String message = new String(messageAndMetadata.message()); try { indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message); LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(), indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message); } catch (IOException e) { LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e)); } return indexRequest; } }, shardingStrategy); }
Example #4
Source File: KafkaDataProvider.java From linden with Apache License 2.0 | 6 votes |
public KafkaDataProvider(String zookeeper, String topic, String groupId) { super(MessageAndMetadata.class); Properties props = new Properties(); props.put("zookeeper.connect", zookeeper); props.put("group.id", groupId); props.put("zookeeper.session.timeout.ms", "30000"); props.put("auto.commit.interval.ms", "1000"); props.put("fetch.message.max.bytes", "4194304"); consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0); iter = stream.iterator(); }
Example #5
Source File: JavaKafkaConsumerHighAPIHbaseImpl.java From dk-fitting with Apache License 2.0 | 6 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); // 2.2 输出 // logger.info(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); // System.out.println(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); try { HbaseUtils.insertData(providerProp.getProperty("consumer.hbase.tablename"), providerProp.getProperty("consumer.hbase.columnFamilyName"),value.message().toString()); } catch (Exception e) { e.printStackTrace(); } } // 3. 表示当前线程执行完成 logger.info("Shutdown Thread:" + this.threadNumber); }
Example #6
Source File: Processer.java From blog_demos with Apache License 2.0 | 6 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); logger.info("server [{}] start run", TOMCAT_ID); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); // 2.2 输出 logger.info("server [{}], threadNumber [{}], offset [{}], key [{}], message[{}]", TOMCAT_ID, threadNumber, value.offset(), value.key(), value.message()); } // 3. 表示当前线程执行完成 logger.info("Shutdown Thread:" + this.threadNumber); }
Example #7
Source File: StreamingEngine.java From spark-streaming-direct-kafka with Apache License 2.0 | 6 votes |
public void start() { SparkConf sparkConf = getSparkConf(); streamingContext = new JavaStreamingContext(sparkConf, Durations.seconds(Long.parseLong(config.getStreamingBatchIntervalInSec()))); JavaInputDStream<MessageAndMetadata<String, byte[]>> dStream = buildInputDStream(streamingContext); JavaPairDStream<String, byte[]> pairDStream = dStream.mapToPair(km -> new Tuple2<>(km.key(), km.message())); pairDStream.foreachRDD(new ProcessStreamingData<>(config)); // process data dStream.foreachRDD(new UpdateOffsetsFn<>(config.getKafkaGroupId(), config.getZkOffsetManager())); streamingContext.start(); }
Example #8
Source File: KafkaOffsetGetter.java From Kafka-Insight with Apache License 2.0 | 6 votes |
/** * When an object implementing interface <code>Runnable</code> is used * to create a thread, starting the thread causes the object's * <code>run</code> method to be called in that separately executing * thread. * <p> * The general contract of the method <code>run</code> is that it may * take any action whatsoever. * * @see Thread#run() */ @Override public void run() { ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1)); KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0); ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator(); while (true) { MessageAndMetadata<byte[], byte[]> offsetMsg = it.next(); if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) { try { GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key())); if (offsetMsg.message() == null) { continue; } kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message())); kafkaConsumerOffsets.put(commitKey, commitValue); } catch (Exception e) { e.printStackTrace(); } } } }
Example #9
Source File: KafkaMqCollect.java From light_drtc with Apache License 2.0 | 6 votes |
public void collectMq(){ Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(Constants.kfTopic, new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder); KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0); ConsumerIterator<String, String> it = stream.iterator(); MessageAndMetadata<String, String> msgMeta; while (it.hasNext()){ msgMeta = it.next(); super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message()); //System.out.println(msgMeta.key()+"\t"+msgMeta.message()); } }
Example #10
Source File: DeleteDocumentProducer.java From elasticsearch-river-kafka with Apache License 2.0 | 6 votes |
/** * For the given messages creates delete document requests and adds them to the bulk processor queue, for * processing later when the size of bulk actions is reached. * * @param messageAndMetadata given message */ public void addMessagesToBulkProcessor(final MessageAndMetadata messageAndMetadata) { final byte[] messageBytes = (byte[]) messageAndMetadata.message(); if (messageBytes == null || messageBytes.length == 0) return; try { final Map<String, Object> messageMap = reader.readValue(messageBytes); if(messageMap.containsKey("id")) { String id = (String)messageMap.get("id"); final DeleteRequest request = Requests.deleteRequest(riverConfig.getIndexName()). type(riverConfig.getTypeName()). id(id); bulkProcessor.add(request); } else { throw new IllegalArgumentException("No id provided in a message to delete a document from EL."); } } catch (Exception ex) { ex.printStackTrace(); } }
Example #11
Source File: KafkaSubscriber.java From quarks with Apache License 2.0 | 6 votes |
void accept(MessageAndMetadata<byte[],byte[]> rec) { try { trace.trace("{} received rec for topic:{} partition:{} offset:{}", id(), rec.topic(), rec.partition(), rec.offset()); T tuple; if (stringToTupleFn != null) tuple = stringToTupleFn.apply(new StringConsumerRecord(rec)); else tuple = byteToTupleFn.apply(new ByteConsumerRecord(rec)); eventSubmitter.accept(tuple); } catch (Exception e) { String tp = String.format("[%s,%d]", rec.topic(), rec.partition()); trace.error("{} failure processing record from {}", id(), tp, e); } }
Example #12
Source File: KafkaWorker.java From elasticsearch-river-kafka with Apache License 2.0 | 6 votes |
/** * Consumes the messages from the partition via specified stream. */ private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) { try { // by default it waits forever for message, but there is timeout configured final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator(); // Consume all the messages of the stream (partition) while (consumerIterator.hasNext() && consume) { final MessageAndMetadata messageAndMetadata = consumerIterator.next(); logMessage(messageAndMetadata); elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata); // StatsD reporting stats.messagesReceived.incrementAndGet(); stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset()); } } catch (ConsumerTimeoutException ex) { logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume); } }
Example #13
Source File: KafkaSourceTest.java From flume-ng-kafka-source with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Before public void setup() throws Exception { mockIt = mock(ConsumerIterator.class); mockMessageAndMetadata = mock(MessageAndMetadata.class); mockChannelProcessor = mock(ChannelProcessor.class); mockBuffer = mock(ByteBuffer.class); mockMessage = mock(Message.class); mockKafkaSource = new KafkaSource(); when(mockMessage.payload()).thenReturn(mockBuffer); when(mockMessageAndMetadata.message()).thenReturn(mockMessage); Field field = AbstractSource.class.getDeclaredField("channelProcessor"); field.setAccessible(true); field.set(mockKafkaSource, mockChannelProcessor); field = KafkaSource.class.getDeclaredField("it"); field.setAccessible(true); field.set(mockKafkaSource, mockIt); }
Example #14
Source File: KafkaPersistReaderTask.java From streams with Apache License 2.0 | 6 votes |
@Override public void run() { MessageAndMetadata<String,String> item; while (true) { for (MessageAndMetadata<String, String> aStream : stream) { item = aStream; reader.persistQueue.add(new StreamsDatum(item.message())); } try { Thread.sleep(new Random().nextInt(100)); } catch (InterruptedException interrupt) { LOGGER.trace("Interrupt", interrupt); } } }
Example #15
Source File: JavaKafkaConsumerHighAPIESImpl.java From dk-fitting with Apache License 2.0 | 6 votes |
public void run() { // 1. 获取数据迭代器 ConsumerIterator<String, String> iter = this.stream.iterator(); // 2. 迭代输出数据 while (iter.hasNext()) { // 2.1 获取数据值 MessageAndMetadata value = iter.next(); // 2.2 输出 // logger.info(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); // System.out.println(this.threadNumber + ":" + value.offset() + ":" + value.key() + ":" + value.message()); try { ElasticsearchUtils.sendToES(esIps,esPort,esClusterName, indexName,typeName, value.message().toString(),providerProp.getProperty("consumer.es.kafkaMessage.separator"), Boolean.parseBoolean(providerProp.getProperty("consumer.es.kafkaMessage.isJsonMessage"))); } catch (Exception e) { e.printStackTrace(); } } // 3. 表示当前线程执行完成 logger.info("Shutdown Thread:" + this.threadNumber); }
Example #16
Source File: KafkaConsumer08.java From datacollector with Apache License 2.0 | 6 votes |
@Override public MessageAndOffset read() throws StageException { try { //has next blocks indefinitely if consumer.timeout.ms is set to -1 //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown //if no message is written to kafka topic in that time. if(consumerIterator.hasNext()) { MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next(); byte[] message = messageAndMetadata.message(); long offset = messageAndMetadata.offset(); int partition = messageAndMetadata.partition(); return new MessageAndOffset(messageAndMetadata.key(), message, offset, partition); } return null; } catch (ConsumerTimeoutException e) { /*For high level consumer the fetching logic is handled by a background fetcher thread and is hidden from user, for either case of 1) broker down or 2) no message is available the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some data into the buffer until timeout. So in a sentence the high-level consumer design is to not let users worry about connect / reconnect issues.*/ return null; } }
Example #17
Source File: KafkaSinkTest.java From flume-ng-kafka-sink with Apache License 2.0 | 6 votes |
@Test public void testPreprocessorForCustomKey(){ Context context = prepareDefaultContext(); // configure the static topic context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC); // configure the preprocessor context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyKeyPreprocessor"); String msg = "custom-key-test"; try { Sink.Status status = prepareAndSend(context, msg); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } MessageAndMetadata message = testUtil.getNextMessageFromConsumer( TestConstants.STATIC_TOPIC); String msgBody = new String((byte[]) message.message()); // check the message body and the key. Only the key should be changed. topic has already been verified by // consuming from the correct topic. assertEquals(msg, msgBody); assertEquals(TestConstants.CUSTOM_KEY, new String((byte[])message.key())); }
Example #18
Source File: KafkaSinkTest.java From flume-ng-kafka-sink with Apache License 2.0 | 6 votes |
@Test public void testPreprocessorForCustomTopic(){ Context context = prepareDefaultContext(); // configure the static topic context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC); // configure the preprocessor context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyTopicPreprocessor"); String msg = "custom-topic-test"; try { Sink.Status status = prepareAndSend(context, msg); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } // when the message is modified from the preprocessor, it should be published // to the custom topic. MessageAndMetadata message = testUtil.getNextMessageFromConsumer( TestConstants.CUSTOM_TOPIC); String msgBody = new String((byte[]) message.message()); // check the message body. Topic has already been verified by consuming the message from the custom topic. assertEquals(msg, msgBody); }
Example #19
Source File: KafkaMessageReceiverRetryTest.java From message-queue-client-framework with Apache License 2.0 | 6 votes |
@Test public void _test() throws Exception { KafkaMessageReceiverRetry kafkaMessageReceiverRetry = new KafkaMessageReceiverRetry("test", 10, new KafkaMessageAdapter<Object, Object>() { @Override public void messageAdapter(ConsumerRecord<?, ?> consumerRecord) throws MQException { throw new MQException(consumerRecord.value().toString()); } @Override public void messageAdapter(MessageAndMetadata<?, ?> messageAndMetadata) throws MQException { throw new MQException(messageAndMetadata.topic()); } }); kafkaMessageReceiverRetry.receiveMessageCount(null); kafkaMessageReceiverRetry.receiveMessageRetry(new ConsumerRecord<Object, Object>("test1", 0, 123, "key", "val")); kafkaMessageReceiverRetry.destroy(); Thread.sleep(2000); }
Example #20
Source File: KafkaSinkTest.java From flume-ng-kafka-sink with Apache License 2.0 | 6 votes |
@Test public void testPreprocessorForCustomMessageBody(){ Context context = prepareDefaultContext(); // configure the static topic context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC); // configure the preprocessor context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyMessageBodyPreprocessor"); String msg = "original-message-body"; try { Sink.Status status = prepareAndSend(context, msg); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } // when the message is modified from the preprocessor, it should be published // to the custom topic. MessageAndMetadata message = testUtil.getNextMessageFromConsumer( TestConstants.STATIC_TOPIC); String msgBody = new String((byte[]) message.message()); // check the message body. assertEquals(TestConstants.CUSTOM_MSG_BODY, msgBody); }
Example #21
Source File: KafkaMessageReceiverRetry.java From message-queue-client-framework with Apache License 2.0 | 6 votes |
/** * Receive message error. * * @param record the record * @param retries the retries * @param e the e */ public void receiveMessageError(T record, int retries, MQException e) { if (record instanceof ConsumerRecord) { ConsumerRecord<?, ?> consumerRecord = (ConsumerRecord) record; logger.error("Receive message failed." + " retries: " + retries + " topic: " + consumerRecord.topic() + " offset: " + consumerRecord.offset() + " partition: " + consumerRecord.partition(), e); } else if (record instanceof MessageAndMetadata) { MessageAndMetadata<?, ?> messageAndMetadata = (MessageAndMetadata) record; logger.error("Receive message failed." + " retries: " + retries + " topic: " + messageAndMetadata.topic() + " offset: " + messageAndMetadata.offset() + " partition: " + messageAndMetadata.partition(), e); } }
Example #22
Source File: KafkaConsumer.java From flume-ng-kafka-sink with Apache License 2.0 | 6 votes |
public MessageAndMetadata getNextMessage(String topic){ List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); KafkaStream stream = streams.get(0); // it has only a single stream, because there is only one consumer final ConsumerIterator<byte[], byte[]> it = stream.iterator(); int counter = 0; while (!it.hasNext()){ // Wait time >= 10s, so return null and exit if(counter == 5){ logger.error("0 messages available to fetch for the topic " + topic); return null; } // wait till a message is published. this is a blocking call. try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { // ignore } counter++; } return it.next(); }
Example #23
Source File: LegacyKafkaMessageIterator.java From secor with Apache License 2.0 | 6 votes |
@Override public Message next() { MessageAndMetadata<byte[], byte[]> kafkaMessage; try { kafkaMessage = mIterator.next(); } catch (ConsumerTimeoutException e) { throw new LegacyConsumerTimeoutException(e); } long timestamp = 0L; if (mConfig.useKafkaTimestamp()) { timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage); } return new Message(kafkaMessage.topic(), kafkaMessage.partition(), kafkaMessage.offset(), kafkaMessage.key(), kafkaMessage.message(), timestamp, null); }
Example #24
Source File: KafkaMessageReceiverRetry.java From message-queue-client-framework with Apache License 2.0 | 6 votes |
/** * Receive message retry. * * @param record the record */ public void receiveMessageRetry(T record) { try { if (record instanceof MessageAndMetadata) { MessageAndMetadata messageAndMetadata = (MessageAndMetadata) record; if (0 < errorMessageCount(messageAndMetadata.topic(), messageAndMetadata.partition(), messageAndMetadata.offset())) { errorMessageQueue.offer(record, errorTimeout, TimeUnit.MILLISECONDS); } } else if (record instanceof ConsumerRecord) { ConsumerRecord consumerRecord = (ConsumerRecord) record; if (0 < errorMessageCount(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset())) { errorMessageQueue.offer(record, errorTimeout, TimeUnit.MILLISECONDS); } } } catch (InterruptedException e) { logger.error("BlockingQueue offer failed.", e); } }
Example #25
Source File: KafkaConsumer.java From sqoop-on-spark with Apache License 2.0 | 6 votes |
public MessageAndMetadata getNextMessage(String topic) { List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); // it has only a single stream, because there is only one consumer KafkaStream stream = streams.get(0); final ConsumerIterator<byte[], byte[]> it = stream.iterator(); int counter = 0; try { if (it.hasNext()) { return it.next(); } else { return null; } } catch (ConsumerTimeoutException e) { logger.error("0 messages available to fetch for the topic " + topic); return null; } }
Example #26
Source File: KafkaMessageReceiverPool.java From message-queue-client-framework with Apache License 2.0 | 5 votes |
@Override public void run() { logger.info(Thread.currentThread().getName() + " clientId: " + stream.clientId() + " start."); ConsumerIterator<K, V> it = stream.iterator(); while (it.hasNext()) { MessageAndMetadata<K, V> messageAndMetadata = it.next(); try { this.adapter.messageAdapter(messageAndMetadata); } catch (MQException e) { if (receiverRetry != null) receiverRetry.receiveMessageRetry(messageAndMetadata); logger.error("Receive message failed." + " topic: " + messageAndMetadata.topic() + " offset: " + messageAndMetadata.offset() + " partition: " + messageAndMetadata.partition(), e); } finally { /* commitOffsets */ if (!getAutoCommit()) { consumer.commitOffsets(Collections.singletonMap( TopicAndPartition.apply(messageAndMetadata.topic(), messageAndMetadata.partition()), OffsetAndMetadata.apply(messageAndMetadata.offset() + 1)), true); } } } logger.info(Thread.currentThread().getName() + " clientId: " + stream.clientId() + " end."); }
Example #27
Source File: KafkaKeyValueProducerPusherTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void test() throws IOException { // Test that the scoped config overrides the generic config Pusher pusher = new KafkaKeyValueProducerPusher<byte[], byte[]>("127.0.0.1:dummy", TOPIC, Optional.of(ConfigFactory.parseMap(ImmutableMap.of( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort())))); String msg1 = "msg1"; String msg2 = "msg2"; pusher.pushMessages(Lists.newArrayList(Pair.of("key1", msg1.getBytes()), Pair.of("key2", msg2.getBytes()))); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC); assert(iterator.hasNext()); MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next(); Assert.assertEquals(new String(messageAndMetadata.key()), "key1"); Assert.assertEquals(new String(messageAndMetadata.message()), msg1); assert(iterator.hasNext()); messageAndMetadata = iterator.next(); Assert.assertEquals(new String(messageAndMetadata.key()), "key2"); Assert.assertEquals(new String(messageAndMetadata.message()), msg2); pusher.close(); }
Example #28
Source File: OldApiTopicConsumer.java From azeroth with Apache License 2.0 | 5 votes |
/** * 提交消息到处理线程队列 * @param message */ private void submitMessageToProcess(final String topicName, final MessageAndMetadata<String, Object> messageAndMeta, final DefaultMessage message) { defaultProcessExecutor.submit(new Runnable() { @Override public void run() { try { long start = logger.isDebugEnabled() ? System.currentTimeMillis() : 0; messageHandler.p2Process(message); if (logger.isDebugEnabled()) { long useTime = System.currentTimeMillis() - start; if (useTime > 1000) logger.debug( "received_topic_useTime [{}]process topic:{} use time {} ms", processorName, topicName, useTime); } consumerContext.saveOffsetsAfterProcessed(messageAndMeta.topic(), messageAndMeta.partition(), messageAndMeta.offset()); } catch (Exception e) { boolean processed = messageHandler.onProcessError(message); if (processed == false) { errorMessageProcessor.submit(message, messageHandler); } logger.error("received_topic_process_error [" + processorName + "]processMessage error,topic:" + topicName, e); } } }); }
Example #29
Source File: AlertKafkaPublisherTest.java From eagle with Apache License 2.0 | 5 votes |
private static void consumeWithOutput(final List<String> outputMessages) { Thread t = new Thread(new Runnable() { @Override public void run() { Properties props = new Properties(); props.put("group.id", "B"); props.put("zookeeper.connect", "127.0.0.1:" + + TEST_KAFKA_ZOOKEEPER_PORT); props.put("zookeeper.session.timeout.ms", "4000"); props.put("zookeeper.sync.time.ms", "2000"); props.put("auto.commit.interval.ms", "1000"); props.put("auto.offset.reset", "smallest"); ConsumerConnector jcc = null; try { ConsumerConfig ccfg = new ConsumerConfig(props); jcc = Consumer.createJavaConsumerConnector(ccfg); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(TEST_TOPIC_NAME, 1); Map<String, List<KafkaStream<byte[], byte[]>>> topicMap = jcc.createMessageStreams(topicCountMap); KafkaStream<byte[], byte[]> cstrm = topicMap.get(TEST_TOPIC_NAME).get(0); for (MessageAndMetadata<byte[], byte[]> mm : cstrm) { String message = new String(mm.message()); outputMessages.add(message); try { Thread.sleep(5000); } catch (InterruptedException e) { } } } finally { if (jcc != null) { jcc.shutdown(); } } } }); t.start(); }
Example #30
Source File: KafkaConsumerThread.java From incubator-iotdb with Apache License 2.0 | 5 votes |
public void run() { for (MessageAndMetadata<String, String> consumerIterator : stream) { String uploadMessage = consumerIterator.message(); logger.info(String.format("%s from partiton[%d]: %s", Thread.currentThread().getName(), consumerIterator.partition(), uploadMessage)); writeData(uploadMessage); } }