Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaSpoutTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void nextTuple() { when(kafkaConsumerFactory.create()).thenReturn(consumer); ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>( Collections.singletonMap(new TopicPartition(DUMMY_TOPIC_NAME, 0), Collections.singletonList(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, 0, "key", new byte[]{0xF})))); when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords); doReturn(Collections.singletonMap(new MetricName("name", "group", "description", Collections.singletonMap("name", "value")), metric)).when(consumer).metrics(); when(metric.metricValue()).thenReturn("sample value"); kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE.name()), topologyContext, collector); verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture()); ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue(); TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0); consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition)); kafkaSpout.nextTuple(); verify(consumer).commitAsync(); verify(topologyContext).registerMetric(eq("name-group-name-value"), kafkaMetricDecoratorArgumentCaptor.capture(), eq(60)); assertEquals("sample value", kafkaMetricDecoratorArgumentCaptor.getValue().getValueAndReset()); kafkaSpout.nextTuple(); verify(collector).emit(eq("default"), listArgumentCaptor.capture()); assertEquals("key", listArgumentCaptor.getValue().get(0)); assertArrayEquals(new byte[]{0xF}, (byte[]) listArgumentCaptor.getValue().get(1)); }
Example 2
Source File: KafkaSpoutTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void ack() { when(kafkaConsumerFactory.create()).thenReturn(consumer); TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0); List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>(); byte[] randomBytes = new byte[1]; for (int i = 0; i < 5; i++) { RANDOM.nextBytes(randomBytes); recordList.add(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, i, "key", Arrays.copyOf(randomBytes, randomBytes.length))); } ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>( Collections.singletonMap(topicPartition, recordList)); when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords); kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATLEAST_ONCE.name()), topologyContext, collector); verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture()); ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue(); consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition)); //poll the topic kafkaSpout.nextTuple(); //emit all of the five records for (int i = 0; i < 5; i++) { kafkaSpout.nextTuple(); } //ack came in out of order and the third record is not acknowledged kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 4)); kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 0)); kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 1)); kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 3)); //commit and poll kafkaSpout.nextTuple(); verify(consumer).commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(2)), null); }
Example 3
Source File: KafkaSpoutTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void fail() { when(kafkaConsumerFactory.create()).thenReturn(consumer); TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0); List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>(); byte[] randomBytes = new byte[1]; for (int i = 0; i < 5; i++) { RANDOM.nextBytes(randomBytes); recordList.add(new ConsumerRecord<>(DUMMY_TOPIC_NAME, 0, i, "key", Arrays.copyOf(randomBytes, randomBytes.length))); } ConsumerRecords<String, byte[]> consumerRecords = new ConsumerRecords<>( Collections.singletonMap(topicPartition, recordList)); when(consumer.poll(any(Duration.class))).thenReturn(consumerRecords); kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATLEAST_ONCE.name()), topologyContext, collector); verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture()); ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue(); consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition)); //poll the topic kafkaSpout.nextTuple(); //emit all of the five records for (int i = 0; i < 5; i++) { kafkaSpout.nextTuple(); } //ack came in out of order, second and third record fails kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 4)); kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 0)); kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 1)); kafkaSpout.ack(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 3)); kafkaSpout.fail(new KafkaSpout.ConsumerRecordMessageId(topicPartition, 2)); //commit and poll kafkaSpout.nextTuple(); verify(consumer).seek(topicPartition, 1); verify(consumer).commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), null); }
Example 4
Source File: KafkaSpoutTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void activate() { when(kafkaConsumerFactory.create()).thenReturn(consumer); kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE.name()), topologyContext, collector); verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture()); ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue(); TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0); consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition)); kafkaSpout.activate(); verify(consumer).resume(Collections.singleton(topicPartition)); }
Example 5
Source File: KafkaSpoutTest.java From incubator-heron with Apache License 2.0 | 5 votes |
@Test public void deactivate() { when(kafkaConsumerFactory.create()).thenReturn(consumer); kafkaSpout.open(Collections.singletonMap(Config.TOPOLOGY_RELIABILITY_MODE, ATMOST_ONCE.name()), topologyContext, collector); verify(consumer).subscribe(eq(Collections.singleton(DUMMY_TOPIC_NAME)), consumerRebalanceListenerArgumentCaptor.capture()); ConsumerRebalanceListener consumerRebalanceListener = consumerRebalanceListenerArgumentCaptor.getValue(); TopicPartition topicPartition = new TopicPartition(DUMMY_TOPIC_NAME, 0); consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(topicPartition)); kafkaSpout.deactivate(); verify(consumer).pause(Collections.singleton(topicPartition)); }