org.apache.kafka.clients.consumer.MockConsumer Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.MockConsumer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MockKafkaClientFactory.java From kafka-pubsub-emulator with Apache License 2.0 | 6 votes |
@Override public Consumer<String, ByteBuffer> createConsumer(String subscription) { MockConsumer<String, ByteBuffer> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); if (!createdConsumers.containsKey(subscription)) { createdConsumers.put(subscription, new ArrayList<>()); } createdConsumers.get(subscription).add(consumer); MockConsumerConfiguration configuration = consumerConfigurations.get(subscription); if (configuration != null) { consumer.updatePartitions(configuration.topic, configuration.partitionInfoList); consumer.updateBeginningOffsets(configuration.startOffsets); consumer.updateEndOffsets(configuration.endOffsets); } return consumer; }
Example #2
Source File: ConsumerMockTestBase.java From vertx-kafka-client with Apache License 2.0 | 6 votes |
@Test public void testConsumeWithHeader(TestContext ctx) { MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST); KafkaReadStream<String, String> consumer = createConsumer(vertx, mock); Async doneLatch = ctx.async(); consumer.handler(record -> { ctx.assertEquals("the_topic", record.topic()); ctx.assertEquals(0, record.partition()); ctx.assertEquals("abc", record.key()); ctx.assertEquals("def", record.value()); Header[] headers = record.headers().toArray(); ctx.assertEquals(1, headers.length); Header header = headers[0]; ctx.assertEquals("header_key", header.key()); ctx.assertEquals("header_value", new String(header.value())); consumer.close(v -> doneLatch.complete()); }); consumer.subscribe(Collections.singleton("the_topic"), v -> { mock.schedulePollTask(() -> { mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0))); mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0L, 0, 0, "abc", "def", new RecordHeaders(Collections.singletonList(new RecordHeader("header_key", "header_value".getBytes()))))); mock.seek(new TopicPartition("the_topic", 0), 0L); }); }); }
Example #3
Source File: ConsumerMockTestBase.java From vertx-kafka-client with Apache License 2.0 | 6 votes |
@Test public void testConsume(TestContext ctx) throws Exception { MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST); KafkaReadStream<String, String> consumer = createConsumer(vertx, mock); Async doneLatch = ctx.async(); consumer.handler(record -> { ctx.assertEquals("the_topic", record.topic()); ctx.assertEquals(0, record.partition()); ctx.assertEquals("abc", record.key()); ctx.assertEquals("def", record.value()); consumer.close(v -> doneLatch.complete()); }); consumer.subscribe(Collections.singleton("the_topic"), v -> { mock.schedulePollTask(() -> { mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0))); mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, "abc", "def")); mock.seek(new TopicPartition("the_topic", 0), 0L); }); }); }
Example #4
Source File: SourcePartitionValidatorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 6 votes |
@Test public void shouldApplyTopicRenameWhenCheckingHealth() { MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); List<PartitionInfo> partitionInfoList = Arrays.asList( new PartitionInfo("replace1", 0, null, null, null), new PartitionInfo("replace1", 1, null, null, null)); consumer.updatePartitions("replace1", partitionInfoList); SourcePartitionValidator sourcePartitionHealthChecker = new SourcePartitionValidator( consumer, SourcePartitionValidator.MatchingStrategy.TOPIC, t -> t.equals("topic1") ? "replace1" : t); assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 0)), is(true)); assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic1", 2)), is(true)); assertThat(sourcePartitionHealthChecker.isHealthy(new TopicPartition("topic2", 0)), is(false)); }
Example #5
Source File: SubscriptionManagerTest.java From kafka-pubsub-emulator with Apache License 2.0 | 5 votes |
private void generateTestRecordsForConsumers( int partitions, int recordsPerPartition, List<Header> headers) { List<MockConsumer<String, ByteBuffer>> consumers = kafkaClientFactory.getConsumersForSubscription(SUBSCRIPTION.getName()); TestHelpers.generateConsumerRecords(KAFKA_TOPIC_NAME, partitions, recordsPerPartition, headers) .forEach(cr -> consumers.get(cr.partition() % consumers.size()).addRecord(cr)); }
Example #6
Source File: Kafka09ConsumerClientTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Test public void testConsume() throws Exception { Config testConfig = ConfigFactory.parseMap(ImmutableMap.of(ConfigurationKeys.KAFKA_BROKERS, "test")); MockConsumer<String, String> consumer = new MockConsumer<String, String>(OffsetResetStrategy.NONE); consumer.assign(Arrays.asList(new TopicPartition("test_topic", 0))); HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition("test_topic", 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); ConsumerRecord<String, String> record0 = new ConsumerRecord<>("test_topic", 0, 0L, "key", "value0"); ConsumerRecord<String, String> record1 = new ConsumerRecord<>("test_topic", 0, 1L, "key", "value1"); ConsumerRecord<String, String> record2 = new ConsumerRecord<>("test_topic", 0, 2L, "key", "value2"); consumer.addRecord(record0); consumer.addRecord(record1); consumer.addRecord(record2); try (Kafka09ConsumerClient<String, String> kafka09Client = new Kafka09ConsumerClient<>(testConfig, consumer);) { // Consume from 0 offset Set<KafkaConsumerRecord> consumedRecords = Sets.newHashSet(kafka09Client.consume(new KafkaPartition.Builder().withId(0).withTopicName("test_topic") .build(), 0l, 100l)); Set<Kafka09ConsumerRecord<String, String>> expected = ImmutableSet.<Kafka09ConsumerRecord<String, String>> of(new Kafka09ConsumerRecord<>(record0), new Kafka09ConsumerRecord<>(record1), new Kafka09ConsumerRecord<>(record2)); Assert.assertEquals(consumedRecords, expected); } }
Example #7
Source File: KafkaTransportsTest.java From baleen with Apache License 2.0 | 5 votes |
@Test public void testKafkaTransportCanRecieve() throws UIMAException, IOException { BaleenCollectionReader kafkaTransportReceiver = createReciever(); MockKafkaResource mockKafkaResource = (MockKafkaResource) kafkaTransportReceiver .getUimaContext() .getResourceObject(SharedKafkaResource.RESOURCE_KEY); MockConsumer<String, String> consumer = mockKafkaResource.getMockConsumer(); consumer.assign( Arrays.asList(new TopicPartition(KafkaTransportReceiver.PARAM_TOPIC_DEFAULT, 0))); HashMap<TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new TopicPartition(KafkaTransportReceiver.PARAM_TOPIC_DEFAULT, 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); consumer.addRecord( new ConsumerRecord<String, String>( KafkaTransportReceiver.PARAM_TOPIC_DEFAULT, 0, 0L, "mykey", JCasSerializationTester.TEST_JSON)); JCasSerializationTester tester = new JCasSerializationTester(); kafkaTransportReceiver.getNext(tester.getOut()); tester.assertCompleteMatch(); kafkaTransportReceiver.destroy(); }
Example #8
Source File: KafkaReadStreamMockTest.java From vertx-kafka-client with Apache License 2.0 | 5 votes |
private MockConsumer<String, String> createMockConsumer(){ MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); Map<org.apache.kafka.common.TopicPartition, Long> beginningOffsets = new HashMap<>(); beginningOffsets.put(new org.apache.kafka.common.TopicPartition(TOPIC, 0), 0L); consumer.updateBeginningOffsets(beginningOffsets); return consumer; }
Example #9
Source File: ConsumerMockTestBase.java From vertx-kafka-client with Apache License 2.0 | 5 votes |
@Test public void testBatch(TestContext ctx) throws Exception { int num = 50; MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST); KafkaReadStream<String, String> consumer = createConsumer(vertx, mock); Async doneLatch = ctx.async(); AtomicInteger count = new AtomicInteger(); consumer.handler(record -> { int val = count.getAndIncrement(); if (val < num) { ctx.assertEquals("the_topic", record.topic()); ctx.assertEquals(0, record.partition()); ctx.assertEquals("key-" + val, record.key()); ctx.assertEquals("value-" + val, record.value()); if (val == num - 1) { consumer.close(v -> doneLatch.complete()); } } }); consumer.subscribe(Collections.singleton("the_topic"), v -> { mock.schedulePollTask(() -> { mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0))); mock.seek(new TopicPartition("the_topic", 0), 0); for (int i = 0; i < num; i++) { mock.addRecord(new ConsumerRecord<>("the_topic", 0, i, "key-" + i, "value-" + i)); } }); }); }
Example #10
Source File: MirusSourceTaskTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Before public void setUp() { mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); mockConsumer.updatePartitions( TOPIC, Arrays.asList( new PartitionInfo(TOPIC, 0, null, null, null), new PartitionInfo(TOPIC, 1, null, null, null))); mirusSourceTask = new MirusSourceTask(consumerProperties -> mockConsumer); // Always return offset = 0 SourceTaskContext context = new SourceTaskContext() { @Override public Map<String, String> configs() { return null; } @Override public OffsetStorageReader offsetStorageReader() { return new OffsetStorageReader() { @Override public <T> Map<String, Object> offset(Map<String, T> partition) { return new HashMap<>(MirusSourceTask.offsetMap(0L)); } @Override public <T> Map<Map<String, T>, Map<String, Object>> offsets( Collection<Map<String, T>> partitions) { return partitions.stream().collect(Collectors.toMap(p -> p, this::offset)); } }; } }; mirusSourceTask.initialize(context); mirusSourceTask.start(mockTaskProperties()); }
Example #11
Source File: SourcePartitionValidatorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Before public void setUp() { this.mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); List<PartitionInfo> partitionInfoList = Arrays.asList( new PartitionInfo("topic1", 0, null, null, null), new PartitionInfo("topic1", 1, null, null, null)); mockConsumer.updatePartitions("topic1", partitionInfoList); }
Example #12
Source File: KafkaMonitorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Test public void shouldContinueRunningWhenExceptionEncountered() throws InterruptedException { Map<String, String> properties = getBaseProperties(); SourceConfig config = new SourceConfig(properties); TaskConfigBuilder taskConfigBuilder = new TaskConfigBuilder(new RoundRobinTaskAssignor(), config); // Require two thrown exceptions to ensure that the KafkaMonitor run loop executes more than // once CountDownLatch exceptionThrownLatch = new CountDownLatch(2); MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) { @Override public Map<String, List<PartitionInfo>> listTopics() { exceptionThrownLatch.countDown(); throw new TimeoutException("KABOOM!"); } }; kafkaMonitor = new KafkaMonitor( mock(ConnectorContext.class), config, consumer, mockDestinationConsumer, taskConfigBuilder); Thread monitorThread = new Thread(kafkaMonitor); monitorThread.start(); exceptionThrownLatch.await(2, TimeUnit.SECONDS); monitorThread.join(1); assertThat(monitorThread.getState(), not(State.TERMINATED)); kafkaMonitor.stop(); monitorThread.interrupt(); monitorThread.join(5000); }
Example #13
Source File: KafkaMonitorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
private MockConsumer<byte[], byte[]> mockDestinationConsumer() { // Topic 5 is NOT present in destination MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); updateMockPartitions(mockConsumer, "topic1", 2); updateMockPartitions(mockConsumer, "topic2", 1); updateMockPartitions(mockConsumer, "topic3", 1); updateMockPartitions(mockConsumer, "topic4", 1); updateMockPartitions(mockConsumer, "reroute.incoming", 1); return mockConsumer; }
Example #14
Source File: KafkaMonitorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
private MockConsumer<byte[], byte[]> mockSourceConsumer() { MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); updateMockPartitions(mockConsumer, "topic1", 2); updateMockPartitions(mockConsumer, "topic2", 1); updateMockPartitions(mockConsumer, "topic3", 1); updateMockPartitions(mockConsumer, "topic4", 1); updateMockPartitions(mockConsumer, "topic5", 1); updateMockPartitions(mockConsumer, "reroute.outgoing", 1); return mockConsumer; }
Example #15
Source File: KafkaMonitorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 5 votes |
private void updateMockPartitions( MockConsumer<byte[], byte[]> mockConsumer, String topicName, int numPartitions) { List<PartitionInfo> partitionInfoList = new ArrayList<>(); for (int i = 0; i < numPartitions; i++) { partitionInfoList.add(new PartitionInfo(topicName, i, null, null, null)); } mockConsumer.updatePartitions(topicName, partitionInfoList); }
Example #16
Source File: KafkaMonitorTest.java From mirus with BSD 3-Clause "New" or "Revised" License | 4 votes |
@Test public void shouldApplyTopicRenameTransforms() { Map<String, String> properties = getBaseProperties(); properties.put(SourceConfigDefinition.TOPICS_REGEX.getKey(), "reroute.*"); properties.put("transforms", "reroute"); properties.put("transforms.reroute.type", "org.apache.kafka.connect.transforms.RegexRouter"); properties.put("transforms.reroute.regex", "^reroute\\.outgoing$"); properties.put("transforms.reroute.replacement", "reroute.incoming"); SourceConfig config = new SourceConfig(properties); MockConsumer<byte[], byte[]> mockSource = new MockConsumer<>(OffsetResetStrategy.EARLIEST); updateMockPartitions(mockSource, "reroute.outgoing", 1); MockConsumer<byte[], byte[]> mockDest = new MockConsumer<>(OffsetResetStrategy.EARLIEST); updateMockPartitions(mockDest, "reroute.incoming", 1); TaskConfigBuilder taskConfigBuilder = new TaskConfigBuilder(new RoundRobinTaskAssignor(), config); KafkaMonitor monitor = new KafkaMonitor( mock(ConnectorContext.class), config, mockSource, mockDest, taskConfigBuilder); monitor.partitionsChanged(); List<Map<String, String>> result = monitor.taskConfigs(3); List<TopicPartition> partitions = assignedTopicPartitionsFromTaskConfigs(result); assertThat(partitions, contains(new TopicPartition("reroute.outgoing", 0))); }
Example #17
Source File: KafkaReadStreamMockTest.java From vertx-kafka-client with Apache License 2.0 | 4 votes |
private void sendNextBatch(MockConsumer<String, String> consumer){ for(int i=0;i<SEND_BATCH && recordsMock.size()>0;i++) consumer.addRecord(recordsMock.pop()); }
Example #18
Source File: KafkaReadStreamMockTest.java From vertx-kafka-client with Apache License 2.0 | 4 votes |
@Test public void shouldNotLoseMessages(TestContext ctx){ Vertx vertx = Vertx.vertx(); Async done = ctx.async(); initRecords(); MockConsumer<String, String> consumer = createMockConsumer(); KafkaReadStream<String, String> readStream = KafkaReadStream.create(vertx, consumer); KafkaConsumer<String, String> consumerVertx = new KafkaConsumerImpl<>(readStream); AtomicLong partitionOffset = new AtomicLong(-1); consumerVertx.handler((r)->{ long offset = r.offset(); partitionOffset.addAndGet(1); ctx.assertEquals(partitionOffset.get(), offset); if(offset == TOTAL_MESSAGES-1){ consumerVertx.close(); done.complete(); } else { if(timer!=null) vertx.cancelTimer(timer); timer = vertx.setTimer(5, (t)->{ consumerVertx.pause(); vertx.getOrCreateContext().runOnContext((t1)->{ consumerVertx.commit(); consumerVertx.resume(); sendNextBatch(consumer); // sends two batches of messages vertx.getOrCreateContext().runOnContext((t2)->{ sendNextBatch(consumer); }); }); }); } }); consumerVertx.exceptionHandler(t->ctx.fail(t)); Set<TopicPartition> partitions = new LinkedHashSet<>(); partitions.add(new TopicPartition(TOPIC, 0)); consumerVertx.assign(partitions, (h)->{ sendNextBatch(consumer); }); }
Example #19
Source File: MockLiKafkaConsumer.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 4 votes |
public MockLiKafkaConsumer(OffsetResetStrategy offsetResetStrategy) { _delegate = new MockConsumer<>(offsetResetStrategy); }
Example #20
Source File: MockLiKafkaConsumer.java From li-apache-kafka-clients with BSD 2-Clause "Simplified" License | 4 votes |
public MockConsumer<byte[], byte[]> getDelegate() { return _delegate; }
Example #21
Source File: MockKafkaResource.java From baleen with Apache License 2.0 | 4 votes |
/** @return the mock consumer for testing */ public MockConsumer<String, String> getMockConsumer() { return mockConsumer; }
Example #22
Source File: MockKafkaClientFactory.java From kafka-pubsub-emulator with Apache License 2.0 | 4 votes |
List<MockConsumer<String, ByteBuffer>> getConsumersForSubscription(String subscription) { return createdConsumers.get(subscription); }
Example #23
Source File: ElasticSearchReaderTest.java From garmadon with Apache License 2.0 | 4 votes |
@Before public void setUp() { GarmadonReader.GarmadonMessageHandler garmadonMessageHandler = mock(GarmadonReader.GarmadonMessageHandler.class); MockConsumer kafkaConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); GarmadonReader.Builder builder = GarmadonReader.Builder.stream(kafkaConsumer); GarmadonReader garmadonReader = builder .intercept(GarmadonMessageFilter.ANY.INSTANCE, garmadonMessageHandler) .build(false); garmadonReaderBuilder = Mockito.mock(GarmadonReader.Builder.class); when(garmadonReaderBuilder.intercept(any(GarmadonMessageFilter.class), any(GarmadonReader.GarmadonMessageHandler.class))).thenReturn(garmadonReaderBuilder); when(garmadonReaderBuilder.build()).thenReturn(garmadonReader); bulkProcessor = Mockito.mock(BulkProcessor.class); prometheusHttpConsumerMetrics = Mockito.mock(PrometheusHttpConsumerMetrics.class); elasticSearchCacheManager = Mockito.mock(ElasticSearchCacheManager.class); elasticSearchReader = new ElasticSearchReader(garmadonReaderBuilder, bulkProcessor, "garmadon-index", prometheusHttpConsumerMetrics, elasticSearchCacheManager); header = EventHeaderProtos.Header.newBuilder() .setUsername("user") .setApplicationId("app_id") .setApplicationName("application_name") .setAttemptId("attempt_id") .setContainerId("container_id") .build(); headerMap.put("pid", ""); headerMap.put("main_class", ""); headerMap.put("application_id", "app_id"); headerMap.put("tags", new ArrayList<>()); headerMap.put("hostname", ""); headerMap.put("component", ""); headerMap.put("application_name", "application_name"); headerMap.put("framework", ""); headerMap.put("attempt_id", "attempt_id"); headerMap.put("container_id", "container_id"); headerMap.put("username", "user"); headerMap.put("executor_id", ""); headerMap.put("timestamp", 0); }
Example #24
Source File: CountryPopulationConsumerUnitTest.java From tutorials with MIT License | 4 votes |
@BeforeEach void setUp() { consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST); updates = new ArrayList<>(); countryPopulationConsumer = new CountryPopulationConsumer(consumer, ex -> this.pollException = ex, updates::add); }