org.apache.kafka.common.errors.WakeupException Java Examples
The following examples show how to use
org.apache.kafka.common.errors.WakeupException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AGenericConsumerHandler2.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
/** * consumer handle process */ public void process() { try { subscribe(getTopicList()); while (!closed.get()) { ConsumerRecords<K, V> records = getMessage(); log.debug("records count: {}", records.count()); if (records == null || records.isEmpty()) continue; handle(records); } } catch (WakeupException e) { log.error(e.getMessage()); //Ignore exception if closing if (!closed.get()) throw e; } finally { this.close(); } }
Example #2
Source File: CountryPopulationConsumer.java From tutorials with MIT License | 6 votes |
private void consume(Runnable beforePollingTask) { try { beforePollingTask.run(); while (true) { ConsumerRecords<String, Integer> records = consumer.poll(Duration.ofMillis(1000)); StreamSupport.stream(records.spliterator(), false) .map(record -> new CountryPopulation(record.key(), record.value())) .forEach(countryPopulationConsumer); consumer.commitSync(); } } catch (WakeupException e) { logger.info("Shutting down..."); } catch (RuntimeException ex) { exceptionConsumer.accept(ex); } finally { consumer.close(); } }
Example #3
Source File: PeriodicNotificationConsumer.java From rya with Apache License 2.0 | 6 votes |
@Override public void run() { try { LOG.info("Configuring KafkaConsumer on thread: {} to subscribe to topic: {}", threadNumber, topic); consumer.subscribe(Arrays.asList(topic)); while (!closed.get()) { final ConsumerRecords<String, CommandNotification> records = consumer.poll(10000); // Handle new records for(final ConsumerRecord<String, CommandNotification> record: records) { final CommandNotification notification = record.value(); LOG.info("Thread {} is adding notification: {}", threadNumber, notification); coord.processNextCommandNotification(notification); } } LOG.info("Finished polling."); } catch (final WakeupException e) { // Ignore exception if closing if (!closed.get()) { throw e; } } finally { consumer.close(); } }
Example #4
Source File: StormKafkaSpout.java From metron with Apache License 2.0 | 6 votes |
@Override public void close() { try { if(!isShutdown.get()) { super.close(); isShutdown.set(true); } } catch(WakeupException we) { //see https://issues.apache.org/jira/browse/STORM-2184 LOG.warn("You can generally ignore these, as per https://issues.apache.org/jira/browse/STORM-2184 -- {}", we.getMessage(), we); } catch(IllegalStateException ise) { if(ise.getMessage().contains("This consumer has already been closed")) { LOG.warn(ise.getMessage()); } else { throw ise; } } }
Example #5
Source File: AGenericConsumerHandler2.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
/** * consumer handle process */ public void process() { try { subscribe(getTopicList()); while (!closed.get()) { ConsumerRecords<K, V> records = getMessage(); log.debug("records count: {}", records.count()); if (records == null || records.isEmpty()) continue; handle(records); } } catch (WakeupException e) { log.error(e.getMessage()); //Ignore exception if closing if (!closed.get()) throw e; } finally { this.close(); } }
Example #6
Source File: AGenericConsumerHandler.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
/** * Runnable interface implement.<BR/> */ @Override public void run() { log.debug("The handler:{} thread started.", id); try { subscribe(getTopicList()); while (!closed.get()) { ConsumerRecords<String, String> records = getMessage(); log.debug("records count: {}", records.count()); if(records == null || records.isEmpty()) continue; handle(records); } } catch (WakeupException e) { log.error(e.getMessage()); //Ignore exception if closing if(!closed.get()) throw e; } finally { this.close(); } log.debug("The handler:{} thread ended.", id); }
Example #7
Source File: ConsumerLoopGeneric.java From OpenIoE with Apache License 2.0 | 6 votes |
@Override public void run() { try { this.consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic + "." + version)); while (true) { ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE); for (ConsumerRecord<String, String> record : records) { String json = new String(record.value()); process(json); } } } catch (WakeupException e) { System.out.println("Consumer " + props.getProperty("client.id") + " from group " + props.getProperty("group.id") + " on topic " + topic + "." + version + " was waken up."); } finally { System.out.println("Consumer " + props.getProperty("client.id") + " from group " + props.getProperty("group.id") + " on topic " + topic + "." + version + " is being terminated."); consumer.close(); } }
Example #8
Source File: MessageConsumer.java From beast with Apache License 2.0 | 6 votes |
public Status consume() throws WakeupException { if (isClosed()) { return new FailureStatus(new RuntimeException("Message consumer was closed")); } Instant startTime = Instant.now(); ConsumerRecords<byte[], byte[]> messages = kafkaConsumer.poll(timeoutMillis); statsClient.count("kafka.consumer.poll.messages", messages.count()); statsClient.timeIt("kafka.consumer.consumption.time", startTime); if (messages.isEmpty()) { return SUCCESS_STATUS; } Instant pollTime = Instant.now(); log.info("Pulled {} messages", messages.count()); Status status = pushToSink(messages, pollTime); return status; }
Example #9
Source File: AGenericConsumerHandler.java From SO with BSD 2-Clause "Simplified" License | 6 votes |
/** * Runnable interface implement.<BR/> */ @Override public void run() { log.debug("The handler:{} thread started.", id); try { subscribe(getTopicList()); while (!closed.get()) { ConsumerRecords<String, String> records = getMessage(); log.debug("records count: {}", records.count()); if(records == null || records.isEmpty()) continue; handle(records); } } catch (WakeupException e) { log.error(e.getMessage()); //Ignore exception if closing if(!closed.get()) throw e; } finally { this.close(); } log.debug("The handler:{} thread ended.", id); }
Example #10
Source File: KafkaAdaptorConsumer.java From pulsar-java-tutorial with Apache License 2.0 | 6 votes |
@Override public void run() { try { consumer.subscribe(topics); log.info("Consumer successfully subscribed to topics {}", topics); ConsumerRecords<Integer, String> records = consumer.poll(Long.MAX_VALUE); records.forEach(record -> { log.info("Received record with a key of {} and a value of {}", record.key(), record.value()); }); } catch (WakeupException e) { // Ignore } finally { consumer.commitSync(); log.info("Consumer for topics {} temporarily closed", topics); this.run(); } }
Example #11
Source File: KafkaConsumerLoop.java From pitchfork with Apache License 2.0 | 6 votes |
@Override public void run() { try { kafkaConsumer.subscribe(sourceTopics); while (true) { var records = kafkaConsumer.poll(ofMillis(pollDurationMs)); for (ConsumerRecord<String, byte[]> record : records) { List<Span> spans = decoder.decodeList(record.value()); spans.stream() .filter(validator::isSpanValid) .peek(span -> spansCounter.increment()) .forEach(fork::processSpan); } } } catch (WakeupException exception) { // ignore for shutdown } finally { kafkaConsumer.close(); } }
Example #12
Source File: MirusSourceTask.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
@Override public List<SourceRecord> poll() { try { logger.trace("Calling poll"); ConsumerRecords<byte[], byte[]> result = consumer.poll(consumerPollTimeoutMillis); logger.trace("Got {} records", result.count()); if (!result.isEmpty()) { return sourceRecords(result); } else { return Collections.emptyList(); } } catch (WakeupException e) { // Ignore exception iff shutting down thread. if (!shutDown.get()) throw e; } shutDownTask(); return Collections.emptyList(); }
Example #13
Source File: SimulateResultService.java From SkaETL with Apache License 2.0 | 6 votes |
public List<SimulateData> readOutPut(String bootStrapServers, String maxRecords, String windowTime) { KafkaConsumer kafkaConsumer = kafkaUtils.kafkaConsumer("latest", bootStrapServers, "simulate"); log.info("Subscribe Topic for {}", SIMULATE_OUTPUT); kafkaConsumer.subscribe(Arrays.asList(SIMULATE_OUTPUT), new Rebalancer()); List<SimulateData> res = new ArrayList<>(); long start = System.currentTimeMillis(); try { while (checkWindow(start, Long.valueOf(windowTime), res.size(), Long.valueOf(maxRecords))) { ConsumerRecords<String, SimulateData> records = kafkaConsumer.poll(100); for (ConsumerRecord<String, SimulateData> record : records) { res.add(record.value()); } log.info("Number item for read OutPut {}", res.size()); kafkaConsumer.commitSync(); } } catch (WakeupException e) { // Ignore exception if closing throw e; } catch (RuntimeException re) { log.error("RuntimeException {}", re); } finally { kafkaConsumer.close(); } return res; }
Example #14
Source File: KafkaService.java From SkaETL with Apache License 2.0 | 6 votes |
public ConsumerRecords<String, String> extractDataFromKafka(String topic, long duration, TimeUnit timeUnit) { long pollingTime = timeUnit.toMillis(duration); log.info("Capture data during {} ms on topic {}", pollingTime, topic); kafkaConsumer.subscribe(Arrays.asList(topic)); try { return kafkaConsumer.poll(pollingTime); } catch (WakeupException e) { // Ignore exception if closing if (!closed.get()) throw e; } catch (RuntimeException re) { log.error("RuntimeException {}", re); } finally { if (closed.get()) { kafkaConsumer.close(); } return null; } }
Example #15
Source File: ConsumeKafkaRecord_1_0.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #16
Source File: KafkaChannelConsumer.java From flowing-retail-old with Apache License 2.0 | 5 votes |
public void run() { Properties configProperties = new Properties(); configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); configProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); // configProperties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); // configProperties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "6000"); // configProperties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "500"); // configProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); // Figure out where to start processing messages from kafkaConsumer = new KafkaConsumer<String, String>(configProperties); kafkaConsumer.subscribe(Arrays.asList(topicName)); System.out.println("["+groupId+"] Started consumer and subscribed to topic " + topicName); // Start processing messages try { while (true) { ConsumerRecords<String, String> records = kafkaConsumer.poll(100); for (ConsumerRecord<String, String> record : records) { eventHandler.handleEvent(record.value()); } } } catch (WakeupException ex) { System.out.println("Exception caught " + ex.getMessage()); } finally { kafkaConsumer.close(); System.out.println("After closing KafkaConsumer"); } }
Example #17
Source File: KafkaEventListener.java From concursus with MIT License | 5 votes |
@Override public void run() { try { kafkaConsumer.subscribe(topics); while (!closed.get()) { kafkaConsumer.poll(timeout).forEach(this::processRecord); } } catch (WakeupException e) { if (!closed.get()) throw e; } finally { kafkaConsumer.close(); } }
Example #18
Source File: ConsumerThread.java From kafka-workers with Apache License 2.0 | 5 votes |
private void commitSync() { Map<TopicPartition, OffsetAndMetadata> offsets = offsetsState.getOffsetsToCommit(); logger.debug("committing offsets sync: {}", offsets); if (!offsets.isEmpty()) { try { consumer.commitSync(offsets); } catch (WakeupException e) { // this has to be repeated if consumer.wakeup() during thread shutdown hasn't woken up any pending poll // operation consumer.commitSync(offsets); } } }
Example #19
Source File: KafkaStreamer.java From ignite with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ @Override public Void call() { consumer.subscribe(topics); try { while (!stopped) { for (ConsumerRecord record : consumer.poll(timeout)) { try { addMessage(record); } catch (Exception e) { U.error(log, "Record is ignored due to an error [record = " + record + ']', e); } } } } catch (WakeupException we) { if (log.isInfoEnabled()) log.info("Consumer is being stopped."); } catch (KafkaException ke) { log.error("Kafka error", ke); } finally { consumer.close(); } return null; }
Example #20
Source File: StormKafkaSpout.java From metron with Apache License 2.0 | 5 votes |
@Override public void deactivate() { try { super.deactivate(); } catch(WakeupException we) { //see https://issues.apache.org/jira/browse/STORM-2184 LOG.warn("You can generally ignore these, as per https://issues.apache.org/jira/browse/STORM-2184 -- {}", we.getMessage(), we); } finally { isShutdown.set(true); } }
Example #21
Source File: ConsumerWorkerTest.java From beast with Apache License 2.0 | 5 votes |
@Test public void shouldStopConsumptionWhenWakeupExceptionIsThrown() throws InterruptedException { Worker worker = new ConsumerWorker("consumer", consumer, new WorkerState()); doThrow(new WakeupException()).when(consumer).consume(); new Thread(worker).start(); Thread.sleep(100); verify(consumer).consume(); verify(consumer).close(); }
Example #22
Source File: KafkaConsumerThreadTest.java From flink with Apache License 2.0 | 5 votes |
@Override public long position(TopicPartition topicPartition) { if (!earlyWakeup) { return mockConsumerAssignmentAndPosition.get(topicPartition); } else { throw new WakeupException(); } }
Example #23
Source File: DefaultKafkaListener.java From kafka-message-tool with MIT License | 5 votes |
private void fetch() { if (!canUseTopicConfigForListener()) { Logger.error("Could not start consumer. Topic config is invalid."); return; } final KafkaTopicConfig topicConfig = listenerConfig.getRelatedConfig(); try { receivedMessagesCount = 0; receivedMessageLimit = Integer.parseInt(listenerConfig.getReceivedMsgLimitCount()); tryFetch(topicConfig); } catch (WakeupException ignored) { Logger.trace("Closing consumer due to wakeup()"); closeConsumer(); } catch (Throwable t) { Logger.error("Exception for fetch()", t); } finally { if (isRunning.get()) { Logger.info(String.format("Consumer stopped (topic:%s, consumer group:%s)", topicConfig.getTopicName(), listenerConfig.getConsumerGroup())); } shouldBeRunning.set(false); isRunning.set(false); } }
Example #24
Source File: ConsumeKafka_1_0.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #25
Source File: ConsumeKafka_0_10.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #26
Source File: ConsumeKafkaRecord_0_10.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #27
Source File: ConsumeKafka.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { lastTriggeredTimestamp = System.currentTimeMillis(); final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[] {lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[] {lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #28
Source File: ConsumeKafkaRecord_0_11.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #29
Source File: ConsumeKafka_0_11.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }
Example #30
Source File: ConsumeKafka_2_0.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final ConsumerPool pool = getConsumerPool(context); if (pool == null) { context.yield(); return; } try (final ConsumerLease lease = pool.obtainConsumer(session, context)) { if (lease == null) { context.yield(); return; } activeLeases.add(lease); try { while (this.isScheduled() && lease.continuePolling()) { lease.poll(); } if (this.isScheduled() && !lease.commit()) { context.yield(); } } catch (final WakeupException we) { getLogger().warn("Was interrupted while trying to communicate with Kafka with lease {}. " + "Will roll back session and discard any partially received data.", new Object[] {lease}); } catch (final KafkaException kex) { getLogger().error("Exception while interacting with Kafka so will close the lease {} due to {}", new Object[]{lease, kex}, kex); } catch (final Throwable t) { getLogger().error("Exception while processing data from kafka so will close the lease {} due to {}", new Object[]{lease, t}, t); } finally { activeLeases.remove(lease); } } }