Java Code Examples for kafka.javaapi.consumer.SimpleConsumer#close()
The following examples show how to use
kafka.javaapi.consumer.SimpleConsumer#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaLatestOffsetFetcher.java From eagle with Apache License 2.0 | 6 votes |
public Map<Integer, Long> fetch(String topic, int partitionCount) { Map<Integer, PartitionMetadata> metadatas = fetchPartitionMetadata(brokerList, port, topic, partitionCount); Map<Integer, Long> ret = new HashMap<>(); for (int partition = 0; partition < partitionCount; partition++) { PartitionMetadata metadata = metadatas.get(partition); if (metadata == null || metadata.leader() == null) { ret.put(partition, -1L); //throw new RuntimeException("Can't find Leader for Topic and Partition. Exiting"); } String leadBroker = metadata.leader().host(); String clientName = "Client_" + topic + "_" + partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long latestOffset = getLatestOffset(consumer, topic, partition, clientName); if (consumer != null) { consumer.close(); } ret.put(partition, latestOffset); } return ret; }
Example 2
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 6 votes |
@Override public int getNumPartitions(String topic) { SimpleConsumer consumer = null; try { consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "partitionLookup"); List<String> topics = new ArrayList<String>(); topics.add(topic); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); if (response.topicsMetadata().size() != 1) { throw new RuntimeException("Expected one metadata for topic " + topic + " found " + response.topicsMetadata().size()); } TopicMetadata topicMetadata = response.topicsMetadata().get(0); return topicMetadata.partitionsMetadata().size(); } finally { if (consumer != null) { consumer.close(); } } }
Example 3
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) { log.info(String.format("Fetching topic metadata from broker %s", broker)); SimpleConsumer consumer = null; try { consumer = getSimpleConsumer(broker); for (int i = 0; i < this.fetchTopicRetries; i++) { try { return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata(); } catch (Exception e) { log.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e); try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { log.warn("Caught InterruptedException: " + e2); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
Example 4
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } }
Example 5
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) { LOG.info(String.format("Fetching topic metadata from broker %s", broker)); SimpleConsumer consumer = null; try { consumer = getSimpleConsumer(broker); for (int i = 0; i < this.fetchTopicRetries; i++) { try { return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata(); } catch (Exception e) { LOG.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e); try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { LOG.warn("Caught InterruptedException: " + e2); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
Example 6
Source File: KafkaRequest.java From HiveKa with Apache License 2.0 | 6 votes |
@Override public long getLastOffset(long time) { SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000, 1024 * 1024, "hadoop-etl"); Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo( time, 1)); OffsetResponse response = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest.CurrentVersion(),"hadoop-etl")); long[] endOffset = response.offsets(topic, partition); consumer.close(); if(endOffset.length == 0) { log.info("The exception is thrown because the latest offset retunred zero for topic : " + topic + " and partition " + partition); } this.latestOffset = endOffset[0]; return endOffset[0]; }
Example 7
Source File: KafkaRequest.java From HiveKa with Apache License 2.0 | 6 votes |
@Override public long getEarliestOffset() { if (this.earliestOffset == -2 && uri != null) { // TODO : Make the hardcoded paramters configurable SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000, 1024 * 1024, "hadoop-etl"); Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo( kafka.api.OffsetRequest.EarliestTime(), 1)); OffsetResponse response = consumer .getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest .CurrentVersion(), "hadoop-etl")); long[] endOffset = response.offsets(topic, partition); consumer.close(); this.earliestOffset = endOffset[0]; return endOffset[0]; } else { return this.earliestOffset; } }
Example 8
Source File: KafkaComponent.java From metron with Apache License 2.0 | 6 votes |
public List<byte[]> readMessages(String topic) { SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer"); FetchRequest req = new FetchRequestBuilder() .clientId("consumer") .addFetch(topic, 0, 0, 100000) .build(); FetchResponse fetchResponse = consumer.fetch(req); Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator(); List<byte[]> messages = new ArrayList<>(); while(results.hasNext()) { ByteBuffer payload = results.next().message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); messages.add(bytes); } consumer.close(); return messages; }
Example 9
Source File: MetricSystemTest.java From eagle with Apache License 2.0 | 6 votes |
@Test public void testMerticSystemWithKafkaSink() throws IOException { JVMMetricSource jvmMetricSource = mockMetricRegistry(); //setup kafka KafkaEmbedded kafkaEmbedded = new KafkaEmbedded(); makeSureTopic(kafkaEmbedded.getZkConnectionString()); //setup metric system File file = genKafkaSinkConfig(kafkaEmbedded.getBrokerConnectionString()); Config config = ConfigFactory.parseFile(file); MetricSystem system = MetricSystem.load(config); system.register(jvmMetricSource); system.start(); system.report(); SimpleConsumer consumer = assertMsgFromKafka(kafkaEmbedded); system.stop(); consumer.close(); kafkaEmbedded.shutdown(); }
Example 10
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 6 votes |
@Override public Message getLastMessage(TopicPartition topicPartition) throws TException { SimpleConsumer consumer = null; try { consumer = createConsumer(topicPartition); if (consumer == null) { return null; } long lastOffset = findLastOffset(topicPartition, consumer); if (lastOffset < 1) { return null; } return getMessage(topicPartition, lastOffset, consumer); } finally { if (consumer != null) { consumer.close(); } } }
Example 11
Source File: KafkaInputFormat.java From HiveKa with Apache License 2.0 | 5 votes |
/** * Gets the metadata from Kafka * * @param conf * @return */ public List<TopicMetadata> getKafkaMetadata(JobConf conf) { ArrayList<String> metaRequestTopics = new ArrayList<String>(); String brokerString = getKafkaBrokers(conf); if (brokerString.isEmpty()) throw new InvalidParameterException("kafka.brokers must contain at least one node"); List<String> brokers = Arrays.asList(brokerString.split("\\s*,\\s*")); Collections.shuffle(brokers); boolean fetchMetaDataSucceeded = false; int i = 0; List<TopicMetadata> topicMetadataList = null; Exception savedException = null; while (i < brokers.size() && !fetchMetaDataSucceeded) { log.info("Trying to connect to broker: " + brokers.get(i)); SimpleConsumer consumer = createConsumer(conf, brokers.get(i)); log.info(String.format("Fetching metadata from broker %s with client id %s for %d topic(s) %s", brokers.get(i), consumer.clientId(), metaRequestTopics.size(), metaRequestTopics)); try { topicMetadataList = consumer.send(new TopicMetadataRequest(metaRequestTopics)).topicsMetadata(); fetchMetaDataSucceeded = true; } catch (Exception e) { savedException = e; log.warn(String.format("Fetching topic metadata with client id %s for topics [%s] from broker [%s] failed", consumer.clientId(), metaRequestTopics, brokers.get(i)), e); } finally { consumer.close(); i++; } } if (!fetchMetaDataSucceeded) { throw new RuntimeException("Failed to obtain metadata!", savedException); } return topicMetadataList; }
Example 12
Source File: ZkConsumerCommand.java From jeesuite-libs with Apache License 2.0 | 5 votes |
/** * 获取指定主题及分区logsize * @param stat */ public void getTopicPartitionLogSize(TopicPartitionInfo stat){ BrokerEndPoint leader = findLeader(stat.getTopic(), stat.getPartition()).leader(); SimpleConsumer consumer = getConsumerClient(leader.host(), leader.port()); try { long logsize = getLastOffset(consumer,stat.getTopic(), stat.getPartition(), kafka.api.OffsetRequest.LatestTime()); stat.setLogSize(logsize); } finally { consumer.close(); } }
Example 13
Source File: SimpleKafkaConsumer.java From twill with Apache License 2.0 | 5 votes |
/** * Creates a RemovalListener that will close SimpleConsumer on cache removal. */ private RemovalListener<BrokerInfo, SimpleConsumer> createRemovalListener() { return new RemovalListener<BrokerInfo, SimpleConsumer>() { @Override public void onRemoval(RemovalNotification<BrokerInfo, SimpleConsumer> notification) { SimpleConsumer consumer = notification.getValue(); if (consumer != null) { consumer.close(); } } }; }
Example 14
Source File: OffsetMonitor.java From uReplicator with Apache License 2.0 | 5 votes |
public void stop() throws InterruptedException { refreshExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); cronExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); for (SimpleConsumer consumer : brokerConsumer.values()) { consumer.close(); } logger.info("OffsetMonitor closed"); }
Example 15
Source File: ZkConsumerCommand.java From azeroth with Apache License 2.0 | 5 votes |
/** * 获取指定主题及分区logsize * @param stat */ public void getTopicPartitionLogSize(TopicPartitionInfo stat) { BrokerEndPoint leader = findLeader(stat.getTopic(), stat.getPartition()).leader(); SimpleConsumer consumer = getConsumerClient(leader.host(), leader.port()); try { long logsize = getLastOffset(consumer, stat.getTopic(), stat.getPartition(), kafka.api.OffsetRequest.LatestTime()); stat.setLogSize(logsize); } finally { consumer.close(); } }
Example 16
Source File: KafkaSimpleConsumer.java From julongchain with Apache License 2.0 | 5 votes |
/** * 关闭对应资源 * * @param consumer */ private static void closeSimpleConsumer(SimpleConsumer consumer) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { // nothings } } }
Example 17
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 4 votes |
private Message getMessage(TopicPartition topicPartition, long offset, SimpleConsumer consumer) { LOG.debug("fetching message topic {} partition {} offset {}", topicPartition.getTopic(), topicPartition.getPartition(), offset); final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes(); final String clientName = getClientName(topicPartition); kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName) .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset, MAX_MESSAGE_SIZE_BYTES) .build(); FetchResponse response = consumer.fetch(request); if (response.hasError()) { consumer.close(); int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()); if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) { throw new MessageDoesNotExistException(); } else { throw new RuntimeException("Error fetching offset data. Reason: " + errorCode); } } MessageAndOffset messageAndOffset = response.messageSet( topicPartition.getTopic(), topicPartition.getPartition()).iterator().next(); byte[] keyBytes = null; if (messageAndOffset.message().hasKey()) { ByteBuffer key = messageAndOffset.message().key(); keyBytes = new byte[key.limit()]; key.get(keyBytes); } byte[] payloadBytes = null; if (!messageAndOffset.message().isNull()) { ByteBuffer payload = messageAndOffset.message().payload(); payloadBytes = new byte[payload.limit()]; payload.get(payloadBytes); } long timestamp = (mConfig.useKafkaTimestamp()) ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset) : 0l; return new Message(topicPartition.getTopic(), topicPartition.getPartition(), messageAndOffset.offset(), keyBytes, payloadBytes, timestamp, null); }
Example 18
Source File: StaticPartitionConnections.java From storm-kafka-0.8-plus with Apache License 2.0 | 4 votes |
public void close() { for (SimpleConsumer consumer : _kafka.values()) { consumer.close(); } }
Example 19
Source File: LowLevelConsumerExample.java From pulsar with Apache License 2.0 | 4 votes |
private static void consumeMessage(Arguments arguments) { Properties properties = new Properties(); properties.put(SimpleConsumer.HTTP_SERVICE_URL, arguments.httpServiceUrl); SimpleConsumer consumer = new SimpleConsumer(arguments.serviceUrl, 0, 0, 0, "clientId", properties); long readOffset = kafka.api.OffsetRequest.EarliestTime(); kafka.api.FetchRequest fReq = new FetchRequestBuilder().clientId("c1") .addFetch(arguments.topicName, arguments.partitionIndex, readOffset, 100000).build(); FetchResponse fetchResponse = consumer.fetch(fReq); TestDecoder decoder = new TestDecoder(); int count = 0; while (count < arguments.totalMessages || arguments.totalMessages == -1) { // 1. Read from topic without subscription/consumer-group name. for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(arguments.topicName, arguments.partitionIndex)) { MessageId msgIdOffset = (messageAndOffset instanceof PulsarMsgAndOffset) ? ((PulsarMsgAndOffset) messageAndOffset).getFullOffset() : null; long currentOffset = messageAndOffset.offset(); if (currentOffset < readOffset) { continue; } ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); Tweet tweet = decoder.fromBytes(bytes); log.info("Received tweet: {}-{}", tweet.userName, tweet.message); count++; TopicAndPartition topicPartition = new TopicAndPartition(arguments.topicName, arguments.partitionIndex); OffsetMetadataAndError offsetError = new OffsetMetadataAndError(msgIdOffset, null, (short) 0); Map<TopicAndPartition, OffsetMetadataAndError> requestInfo = Collections.singletonMap(topicPartition, offsetError); // 2. Commit offset for a given topic and subscription-name/consumer-name. OffsetCommitRequest offsetReq = new OffsetCommitRequest(arguments.groupName, requestInfo, (short) -1, 0, "c1"); consumer.commitOffsets(offsetReq); } } consumer.close(); }
Example 20
Source File: MessageService.java From kafka-monitor with Apache License 2.0 | 4 votes |
public List<Message> getMesage(String topicName, int partitionID, int offset, int count) { Topic topic = kafkaService.getTopic(topicName); Partition partition = topic.getPartition(partitionID); Broker broker = kafkaService.getBrokerById(partition.getLeader().getId()); SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 10000, ""); FetchRequestBuilder requestBuilder = new FetchRequestBuilder() .clientId("kafkaMonitor") .maxWait(5000) .minBytes(1); List<Message> messageList = new ArrayList<>(count); long currentOffset = offset; while (messageList.size() < count) { kafka.api.FetchRequest request = requestBuilder.addFetch(topicName, partitionID, currentOffset, 1024 * 1024).build(); kafka.javaapi.FetchResponse response = consumer.fetch(request); ByteBufferMessageSet messageSet = response.messageSet(topicName, partitionID); if (messageSet.validBytes() <= 0) break; int oldSize = messageList.size(); StreamSupport.stream(messageSet.spliterator(), false) .limit(count - messageList.size()) .map(MessageAndOffset::message) .map((msg) -> { Message mmsg = new Message(); if (msg.hasKey()) { mmsg.setKey(readString(msg.key())); } if (!msg.isNull()) { mmsg.setMessage(readString(msg.payload())); } mmsg.setValid(msg.isValid()); mmsg.setCompressionCodec(msg.compressionCodec().name()); mmsg.setChecksum(msg.checksum()); return mmsg; }).forEach(messageList::add); currentOffset += messageList.size() - oldSize; } consumer.close(); return messageList; }