Python confluent_kafka.KafkaError._PARTITION_EOF Examples
The following are 14
code examples of confluent_kafka.KafkaError._PARTITION_EOF().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
confluent_kafka.KafkaError
, or try the search function
.
Example #1
Source File: worker.py From incubator-spot with Apache License 2.0 | 6 votes |
def start(self): self._logger.info("Listening topic:{0}".format(self.kafka_consumer.Topic)) consumer = self.kafka_consumer.start() try: while True: message = consumer.poll(timeout=1.0) if message is None: continue if not message.error(): self._new_file(message.value().decode('utf-8')) elif message.error(): if message.error().code() == KafkaError._PARTITION_EOF: continue elif message.error: raise KafkaException(message.error()) except KeyboardInterrupt: sys.stderr.write('%% Aborted by user\n') consumer.close()
Example #2
Source File: worker.py From incubator-spot with Apache License 2.0 | 6 votes |
def start(self): self._logger.info("Listening topic:{0}".format(self.kafka_consumer.Topic)) consumer = self.kafka_consumer.start() try: while True: message = consumer.poll(timeout=1.0) if message is None: continue if not message.error(): self._new_file(message.value().decode('utf-8')) elif message.error(): if message.error().code() == KafkaError._PARTITION_EOF: continue elif message.error: raise KafkaException(message.error()) except KeyboardInterrupt: sys.stderr.write('%% Aborted by user\n') consumer.close()
Example #3
Source File: _stream_thread.py From winton-kafka-streams with Apache License 2.0 | 6 votes |
def poll_requests(self, poll_timeout): """ Get the next batch of records """ # The current python kafka client gives us messages one by one, # but for better throughput we want to process many records at once. # Keep polling until we get no more records out. records = [] record = self.consumer.poll(poll_timeout) while record is not None: if not record.error(): self.log.debug('Received message at offset: %d', record.offset()) records.append(record) record = self.consumer.poll(0.) elif record.error().code() == KafkaError._PARTITION_EOF: record = self.consumer.poll(0.) elif record.error(): self.log.error('Record error received: %s', record.error()) return records
Example #4
Source File: KafkaConsumer.py From ChaosTestingCode with MIT License | 6 votes |
def start_consuming(self): self.subscribe() try: while not self.terminate: msg = self.consumer.poll(2.0) if msg is None: continue if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: continue else: console_out(msg.error(), self.actor) break self.msg_monitor.append(msg.value(), self.consumer_id, self.actor) console_out("Consumption terminated", self.actor) self.consumer.close() except Exception as e: console_out("Consumption terminated due to error", self.actor) template = "An exception of type {0} occurred. Arguments:{1!r}" message = template.format(type(e).__name__, e.args) console_out(message, self.actor)
Example #5
Source File: consumer.py From kafka-compose with MIT License | 6 votes |
def main(): batch = set() try: while True: msg = consumer.poll(timeout=0.1) if msg: batch = process_message(msg, config['CONSUMER_BATCH_SIZE']) elif msg is None: print('No message') elif not msg.error(): print('Received message: {}'.format(msg.value())) elif msg.error().code() == KafkaError._PARTITION_EOF: print('End of partition reached {}, {}'.format(msg.topic(), msg.partition())) else: print('Error occurred: {}'.format(msg.error().str())) except KeyboardInterrupt: pass except SerializerError as e: print('Message deserialization failed for {msg}: {e}'.format(msg=msg, e=e)) finally: persist_messages(batch) consumer.close()
Example #6
Source File: kafka.py From tributary with Apache License 2.0 | 5 votes |
def __init__(self, servers, group, topics, json=False, wrap=False, interval=1): c = Consumer({ 'bootstrap.servers': servers, 'group.id': group, 'default.topic.config': { 'auto.offset.reset': 'smallest' } }) if not isinstance(topics, list): topics = [topics] c.subscribe(topics) async def _listen(consumer=c, json=json, wrap=wrap, interval=interval): while True: msg = consumer.poll(interval) if msg is None: continue if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: continue else: print(msg.error()) break msg = msg.value().decode('utf-8') if not msg: break if json: msg = JSON.loads(msg) if wrap: msg = [msg] yield msg super().__init__(foo=_listen) self._name = 'Kafka'
Example #7
Source File: kafka_streaming_client.py From agogosml with MIT License | 5 votes |
def handle_kafka_error(self, msg): # pragma: no cover """Handle an error in kafka.""" if msg.error().code() == KafkaError._PARTITION_EOF: # End of partition event self.logger.info('%% %s [%d] reached end at offset %d\n', msg.topic(), msg.partition(), msg.offset()) else: # Error raise KafkaException(msg.error())
Example #8
Source File: test_kafka_msg_handler.py From koku with GNU Affero General Public License v3.0 | 5 votes |
def test_listen_for_msg_loop(self, mock_consumer, mock_listen): """Test that the message loop only calls listen for messages on valid messages.""" msg_list = [ None, MockMessage(offset=1), MockMessage(offset=2, error=MockError(KafkaError._PARTITION_EOF)), MockMessage(offset=3, error=MockError(KafkaError._MSG_TIMED_OUT)), ] mock_consumer.return_value = MockKafkaConsumer(msg_list) with patch("itertools.count", side_effect=[[0, 1, 2, 3]]): # mocking the infinite loop with self.assertLogs(logger="masu.external.kafka_msg_handler", level=logging.WARNING): msg_handler.listen_for_messages_loop() mock_listen.assert_called_once()
Example #9
Source File: ws.py From kryptoflow with GNU General Public License v3.0 | 5 votes |
def on_next(self, value): if not value.error(): print(value) self.ws.emit('price_event', value) # , namespace='live') elif value.error().code() != KafkaError._PARTITION_EOF: running = False
Example #10
Source File: kafka_source_extractor.py From amundsendatabuilder with Apache License 2.0 | 5 votes |
def consume(self): # Type: () -> Any """ Consume messages from a give list of topic :return: """ records = [] start = datetime.now() try: while True: msg = self.consumer.poll(timeout=self.consumer_poll_timeout) end = datetime.now() # The consumer exceeds consume timeout if (end - start) > timedelta(seconds=self.consumer_total_timeout): # Exceed the consume timeout break if msg is None: continue if msg.error(): # Hit the EOF of partition if msg.error().code() == KafkaError._PARTITION_EOF: continue else: raise KafkaException(msg.error()) else: records.append(msg.value()) except Exception as e: LOGGER.exception(e) finally: return records
Example #11
Source File: test_transactions.py From confluent-kafka-python with Apache License 2.0 | 5 votes |
def read_all_msgs(consumer): """ Consumes all messages in the consumer assignment. This method assumes the consumer has not already read all of the messages available in a partition. :param consumer: :returns: total messages read :rtype: int """ msg_cnt = 0 eof = {} print("=== Draining {} ===".format(consumer.assignment())) while (True): msg = consumer.poll(timeout=1.0) if msg is None: continue topic, partition = msg.topic(), msg.partition() if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: eof[(topic, partition)] = True if len(eof) == len(consumer.assignment()): break continue eof.pop((topic, partition), None) msg_cnt += 1 return msg_cnt
Example #12
Source File: confluent.py From kafka-influxdb with Apache License 2.0 | 5 votes |
def _handle_error(msg): if not msg.error(): return # Error or event if msg.error().code() == KafkaError._PARTITION_EOF: # End of partition event logging.info('%s [%d] reached end at offset %d with key %s\n', msg.topic(), msg.partition(), msg.offset(), str(msg.key())) else: raise EncoderError(msg.error())
Example #13
Source File: input.py From tributary with Apache License 2.0 | 4 votes |
def kafka(callback, servers, group, topics, json=False, wrap=False, interval=1): '''Connect to kafka server and pipe results through the callback Args: callback (callable): function to call on websocket data servers (list): kafka bootstrap servers group (str): kafka group id topics (list): list of kafka topics to connect to json (bool): load websocket data as json wrap (bool): wrap result in a list interval (int): socketio wai interval ''' c = Consumer({ 'bootstrap.servers': servers, 'group.id': group, 'default.topic.config': { 'auto.offset.reset': 'smallest' } }) if not isinstance(topics, list): topics = [topics] c.subscribe(topics) def _listen(consumer, json, wrap, interval): while True: msg = consumer.poll(interval) if msg is None: continue if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: continue else: break msg = msg.value().decode('utf-8') if not msg: break if json: msg = load_json(msg) if wrap: msg = [msg] callback(msg)
Example #14
Source File: consumer.py From openwhisk-package-kafka with Apache License 2.0 | 4 votes |
def __pollForMessages(self): messages = [] totalPayloadSize = 0 batchMessages = True if self.__shouldRun(): while batchMessages and (self.secondsSinceLastPoll() < 2): if self.queuedMessage != None: logging.debug('[{}] Handling message left over from last batch.'.format(self.trigger)) message = self.queuedMessage self.queuedMessage = None else: message = self.consumer.poll(1.0) if self.secondsSinceLastPoll() < 0: logging.info('[{}] Completed first poll'.format(self.trigger)) if (message is not None): if not message.error(): logging.debug("Consumed message: {}".format(str(message))) messageSize = self.__sizeMessage(message) if totalPayloadSize + messageSize > payload_limit: if len(messages) == 0: logging.error('[{}] Single message at offset {} exceeds payload size limit. Skipping this message!'.format(self.trigger, message.offset())) self.consumer.commit(message=message, async=False) else: logging.debug('[{}] Message at offset {} would cause payload to exceed the size limit. Queueing up for the next round...'.format(self.trigger, message.offset())) self.queuedMessage = message # in any case, we need to stop batching now batchMessages = False else: totalPayloadSize += messageSize messages.append(message) elif message.error().code() != KafkaError._PARTITION_EOF: logging.error('[{}] Error polling: {}'.format(self.trigger, message.error())) batchMessages = False else: logging.debug('[{}] No more messages. Stopping batch op.'.format(self.trigger)) batchMessages = False else: logging.debug('[{}] message was None. Stopping batch op.'.format(self.trigger)) batchMessages = False logging.debug('[{}] Completed poll'.format(self.trigger)) if len(messages) > 0: logging.info("[{}] Found {} messages with a total size of {} bytes".format(self.trigger, len(messages), totalPayloadSize)) self.updateLastPoll() return messages # decide whether or not to disable a trigger based on the status code returned # from firing the trigger. Specifically, disable on all 4xx status codes # except 408 (gateway timeout), 409 (document update conflict), and 429 (throttle)