org.apache.pulsar.client.api.SubscriptionInitialPosition Java Examples
The following examples show how to use
org.apache.pulsar.client.api.SubscriptionInitialPosition.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PulsarTestSupport.java From hazelcast-jet-contrib with Apache License 2.0 | 8 votes |
protected static Consumer<Double> getConsumer(String topicName) throws PulsarClientException { if (!integerConsumerMap.containsKey(topicName)) { Consumer<Double> newConsumer = getClient() .newConsumer(Schema.DOUBLE) .topic(topicName) .consumerName("hazelcast-jet-consumer-" + topicName) .subscriptionName("hazelcast-jet-subscription") .subscriptionType(SubscriptionType.Exclusive) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .receiverQueueSize(QUEUE_CAPACITY) .subscribe(); integerConsumerMap.put(topicName, newConsumer); return newConsumer; } else { return integerConsumerMap.get(topicName); } }
Example #2
Source File: ConsumerConnector.java From pulsar with Apache License 2.0 | 6 votes |
private void resetOffsets(Consumer<byte[]> consumer, SubscriptionInitialPosition strategy) { if (strategy == null) { return; } log.info("Resetting partition {} for group-id {} and seeking to {} position", consumer.getTopic(), consumer.getSubscription(), strategy); try { if (strategy == SubscriptionInitialPosition.Earliest) { consumer.seek(MessageId.earliest); } else { consumer.seek(MessageId.latest); } } catch (PulsarClientException e) { log.warn("Failed to reset offset for consumer {} to {}, {}", consumer.getTopic(), strategy, e.getMessage(), e); } }
Example #3
Source File: PulsarSourceBuilderTest.java From pulsar with Apache License 2.0 | 6 votes |
@Test public void testBuildWithConfPojo() throws PulsarClientException { ClientConfigurationData clientConf = new ClientConfigurationData(); clientConf.setServiceUrl("testServiceUrl"); ConsumerConfigurationData consumerConf = new ConsumerConfigurationData(); consumerConf.setTopicNames(new HashSet<>(Arrays.asList("testTopic"))); consumerConf.setSubscriptionName("testSubscriptionName"); consumerConf.setSubscriptionInitialPosition(SubscriptionInitialPosition.Earliest); SourceFunction sourceFunction = pulsarSourceBuilder .pulsarAllClientConf(clientConf) .pulsarAllConsumerConf(consumerConf) .build(); Assert.assertNotNull(sourceFunction); }
Example #4
Source File: PulsarConsumerBuilder.java From hazelcast-jet-contrib with Apache License 2.0 | 6 votes |
private ConsumerContext( @Nonnull ILogger logger, @Nonnull PulsarClient client, @Nonnull List<String> topics, @Nonnull Map<String, Object> consumerConfig, @Nonnull SupplierEx<Schema<M>> schemaSupplier, @Nonnull SupplierEx<BatchReceivePolicy> batchReceivePolicySupplier, @Nonnull FunctionEx<Message<M>, T> projectionFn ) throws PulsarClientException { this.logger = logger; this.projectionFn = projectionFn; this.client = client; this.consumer = client.newConsumer(schemaSupplier.get()) .topics(topics) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .loadConf(consumerConfig) .batchReceivePolicy(batchReceivePolicySupplier.get()) .subscriptionType(SubscriptionType.Shared) .subscribe(); }
Example #5
Source File: CompactionTest.java From pulsar with Apache License 2.0 | 5 votes |
@Test(timeOut = 20000, dataProvider = "lastDeletedBatching") public void testAllEmptyCompactionLedger(boolean batchEnabled) throws Exception { final String topic = "persistent://my-property/use/my-ns/testAllEmptyCompactionLedger" + UUID.randomUUID().toString(); final int messages = 10; // 1.create producer and publish message to the topic. ProducerBuilder<byte[]> builder = pulsarClient.newProducer().topic(topic); if (!batchEnabled) { builder.enableBatching(false); } else { builder.batchingMaxMessages(messages / 5); } Producer<byte[]> producer = builder.create(); List<CompletableFuture<MessageId>> futures = new ArrayList<>(messages); for (int i = 0; i < messages; i++) { futures.add(producer.newMessage().keyBytes("1".getBytes()).value("".getBytes()).sendAsync()); } FutureUtil.waitForAll(futures).get(); // 2.compact the topic. Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // consumer with readCompacted enabled only get compacted entries try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { Message<byte[]> m = consumer.receive(2, TimeUnit.SECONDS); assertNull(m); } }
Example #6
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 5 votes |
private SubscriptionInitialPosition resetOffsets(final TopicPartition partition) { log.info("Resetting partition {} and seeking to {} position", partition, strategy); if (strategy == SubscriptionInitialPosition.Earliest) { seekToBeginning(Collections.singleton(partition)); } else { seekToEnd(Collections.singleton(partition)); } return strategy; }
Example #7
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 5 votes |
private SubscriptionInitialPosition getStrategy(final String strategy) { switch(strategy) { case "earliest": return SubscriptionInitialPosition.Earliest; default: return SubscriptionInitialPosition.Latest; } }
Example #8
Source File: ConsumerConnector.java From pulsar with Apache License 2.0 | 5 votes |
public ConsumerConnector(ConsumerConfig config) { checkNotNull(config, "ConsumerConfig can't be null"); clientId = config.clientId(); groupId = config.groupId(); isAutoCommit = config.autoCommitEnable(); if ("largest".equalsIgnoreCase(config.autoOffsetReset())) { strategy = SubscriptionInitialPosition.Latest; } else if ("smallest".equalsIgnoreCase(config.autoOffsetReset())) { strategy = SubscriptionInitialPosition.Earliest; } String consumerId = !config.consumerId().isEmpty() ? config.consumerId().get() : null; int maxMessage = config.queuedMaxMessages(); String serviceUrl = config.zkConnect(); Properties properties = config.props() != null && config.props().props() != null ? config.props().props() : new Properties(); try { client = PulsarClientKafkaConfig.getClientBuilder(properties).serviceUrl(serviceUrl).build(); } catch (PulsarClientException e) { throw new IllegalArgumentException( "Failed to create pulsar-client using url = " + serviceUrl + ", properties = " + properties, e); } topicStreams = Sets.newConcurrentHashSet(); consumerBuilder = client.newConsumer(); consumerBuilder.subscriptionName(groupId); if (properties.containsKey("queued.max.message.chunks") && config.queuedMaxMessages() > 0) { consumerBuilder.receiverQueueSize(maxMessage); } if (consumerId != null) { consumerBuilder.consumerName(consumerId); } if (properties.containsKey("auto.commit.interval.ms") && config.autoCommitIntervalMs() > 0) { consumerBuilder.acknowledgmentGroupTime(config.autoCommitIntervalMs(), TimeUnit.MILLISECONDS); } this.executor = Executors.newScheduledThreadPool(1, new DefaultThreadFactory("pulsar-kafka")); }
Example #9
Source File: PulsarSourceBuilderTest.java From pulsar with Apache License 2.0 | 5 votes |
@Test public void testBuild() throws PulsarClientException { SourceFunction sourceFunction = pulsarSourceBuilder .serviceUrl("testServiceUrl") .topic("testTopic") .subscriptionName("testSubscriptionName") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .build(); Assert.assertNotNull(sourceFunction); }
Example #10
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 5 votes |
private SubscriptionInitialPosition getStrategy(final String strategy) { if ("earliest".equals(strategy)) { return SubscriptionInitialPosition.Earliest; } else { return SubscriptionInitialPosition.Latest; } }
Example #11
Source File: PulsarSourceBuilder.java From pulsar with Apache License 2.0 | 5 votes |
private PulsarSourceBuilder(DeserializationSchema<T> deserializationSchema) { this.deserializationSchema = deserializationSchema; clientConfigurationData = new ClientConfigurationData(); consumerConfigurationData = new ConsumerConfigurationData<>(); clientConfigurationData.setServiceUrl(SERVICE_URL); consumerConfigurationData.setTopicNames(new TreeSet<>()); consumerConfigurationData.setSubscriptionName(SUBSCRIPTION_NAME); consumerConfigurationData.setSubscriptionInitialPosition(SubscriptionInitialPosition.Latest); }
Example #12
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 5 votes |
private SubscriptionInitialPosition resetOffsets(final TopicPartition partition) { log.info("Resetting partition {} and seeking to {} position", partition, strategy); if (strategy == SubscriptionInitialPosition.Earliest) { seekToBeginning(partition); } else { seekToEnd(partition); } return strategy; }
Example #13
Source File: PulsarSourceTest.java From pulsar with Apache License 2.0 | 5 votes |
private static PulsarSourceConfig getPulsarConfigs(boolean multiple) { PulsarSourceConfig pulsarConfig = new PulsarSourceConfig(); pulsarConfig.setProcessingGuarantees(FunctionConfig.ProcessingGuarantees.ATLEAST_ONCE); if (multiple) { pulsarConfig.setTopicSchema(multipleConsumerConfigs); } else { pulsarConfig.setTopicSchema(consumerConfigs); } pulsarConfig.setTypeClassName(String.class.getName()); pulsarConfig.setSubscriptionPosition(SubscriptionInitialPosition.Latest); pulsarConfig.setSubscriptionType(SubscriptionType.Shared); return pulsarConfig; }
Example #14
Source File: PulsarFunctionsTest.java From pulsar with Apache License 2.0 | 4 votes |
private void testDebeziumMySqlConnect(String converterClassName, boolean jsonWithEnvelope) throws Exception { final String tenant = TopicName.PUBLIC_TENANT; final String namespace = TopicName.DEFAULT_NAMESPACE; final String outputTopicName = "debe-output-topic-name"; boolean isJsonConverter = converterClassName.endsWith("JsonConverter"); final String consumeTopicName = "debezium/mysql-" + (isJsonConverter ? "json" : "avro") + "/dbserver1.inventory.products"; final String sourceName = "test-source-debezium-mysql" + (isJsonConverter ? "json" : "avro") + "-" + functionRuntimeType + "-" + randomName(8); // This is the binlog count that contained in mysql container. final int numMessages = 47; if (pulsarCluster == null) { super.setupCluster(); super.setupFunctionWorkers(); } @Cleanup PulsarClient client = PulsarClient.builder() .serviceUrl(pulsarCluster.getPlainTextServiceUrl()) .build(); @Cleanup PulsarAdmin admin = PulsarAdmin.builder().serviceHttpUrl(pulsarCluster.getHttpServiceUrl()).build(); initNamespace(admin); try { SchemaInfo lastSchemaInfo = admin.schemas().getSchemaInfo(consumeTopicName); log.info("lastSchemaInfo: {}", lastSchemaInfo == null ? "null" : lastSchemaInfo.toString()); } catch (Exception e) { log.warn("failed to get schemaInfo for topic: {}, exceptions message: {}", consumeTopicName, e.getMessage()); } admin.topics().createNonPartitionedTopic(outputTopicName); @Cleanup DebeziumMySqlSourceTester sourceTester = new DebeziumMySqlSourceTester(pulsarCluster, converterClassName); sourceTester.getSourceConfig().put("json-with-envelope", jsonWithEnvelope); // setup debezium mysql server DebeziumMySQLContainer mySQLContainer = new DebeziumMySQLContainer(pulsarCluster.getClusterName()); sourceTester.setServiceContainer(mySQLContainer); // prepare the testing environment for source prepareSource(sourceTester); // submit the source connector submitSourceConnector(sourceTester, tenant, namespace, sourceName, outputTopicName); // get source info getSourceInfoSuccess(sourceTester, tenant, namespace, sourceName); // get source status Failsafe.with(statusRetryPolicy).run(() -> getSourceStatus(tenant, namespace, sourceName)); // wait for source to process messages Failsafe.with(statusRetryPolicy).run(() -> waitForProcessingSourceMessages(tenant, namespace, sourceName, numMessages)); @Cleanup Consumer consumer = client.newConsumer(getSchema(jsonWithEnvelope)) .topic(consumeTopicName) .subscriptionName("debezium-source-tester") .subscriptionType(SubscriptionType.Exclusive) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); log.info("[debezium mysql test] create consumer finish. converterName: {}", converterClassName); // validate the source result sourceTester.validateSourceResult(consumer, 9, null, converterClassName); // prepare insert event sourceTester.prepareInsertEvent(); // validate the source insert event sourceTester.validateSourceResult(consumer, 1, SourceTester.INSERT, converterClassName); // prepare update event sourceTester.prepareUpdateEvent(); // validate the source update event sourceTester.validateSourceResult(consumer, 1, SourceTester.UPDATE, converterClassName); // prepare delete event sourceTester.prepareDeleteEvent(); // validate the source delete event sourceTester.validateSourceResult(consumer, 1, SourceTester.DELETE, converterClassName); // delete the source deleteSource(tenant, namespace, sourceName); // get source info (source should be deleted) getSourceInfoNotFound(tenant, namespace, sourceName); }
Example #15
Source File: CompactionTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 20000) public void testBatchAndNonBatchWithoutEmptyPayload() throws PulsarClientException, ExecutionException, InterruptedException { final String topic = "persistent://my-property/use/my-ns/testBatchAndNonBatchWithoutEmptyPayload" + UUID.randomUUID().toString(); // 1.create producer and publish message to the topic. Producer<byte[]> producer = pulsarClient.newProducer() .topic(topic) .enableBatching(true) .batchingMaxPublishDelay(1, TimeUnit.DAYS) .create(); final String k1 = "k1"; final String k2 = "k2"; producer.newMessage().key(k1).value("0".getBytes()).send(); List<CompletableFuture<MessageId>> futures = new ArrayList<>(7); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 1 + "").getBytes()).sendAsync()); } producer.flush(); producer.newMessage().key(k1).value("3".getBytes()).send(); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 4 + "").getBytes()).sendAsync()); } producer.flush(); for (int i = 0; i < 3; i++) { futures.add(producer.newMessage().key(k2).value((i + "").getBytes()).sendAsync()); } producer.newMessage().key(k2).value("3".getBytes()).send(); producer.flush(); FutureUtil.waitForAll(futures).get(); // 2.compact the topic. Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // consumer with readCompacted enabled only get compacted entries try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { Message<byte[]> m1 = consumer.receive(2, TimeUnit.SECONDS); Message<byte[]> m2 = consumer.receive(2, TimeUnit.SECONDS); assertNotNull(m1); assertNotNull(m2); assertEquals(m1.getKey(), k1); assertEquals(new String(m1.getValue()), "5"); assertEquals(m2.getKey(), k2); assertEquals(new String(m2.getValue()), "3"); Message<byte[]> none = consumer.receive(2, TimeUnit.SECONDS); assertNull(none); } }
Example #16
Source File: CompactionTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 20000) public void testBatchAndNonBatchWithEmptyPayload() throws PulsarClientException, ExecutionException, InterruptedException { final String topic = "persistent://my-property/use/my-ns/testBatchAndNonBatchWithEmptyPayload" + UUID.randomUUID().toString(); // 1.create producer and publish message to the topic. Producer<byte[]> producer = pulsarClient.newProducer() .topic(topic) .enableBatching(true) .batchingMaxPublishDelay(1, TimeUnit.DAYS) .create(); final String k1 = "k1"; final String k2 = "k2"; final String k3 = "k3"; producer.newMessage().key(k1).value("0".getBytes()).send(); List<CompletableFuture<MessageId>> futures = new ArrayList<>(7); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 1 + "").getBytes()).sendAsync()); } producer.flush(); producer.newMessage().key(k1).value("3".getBytes()).send(); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 4 + "").getBytes()).sendAsync()); } producer.flush(); for (int i = 0; i < 3; i++) { futures.add(producer.newMessage().key(k2).value((i + 10 + "").getBytes()).sendAsync()); } producer.flush(); producer.newMessage().key(k2).value("".getBytes()).send(); producer.newMessage().key(k3).value("0".getBytes()).send(); FutureUtil.waitForAll(futures).get(); // 2.compact the topic. Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // consumer with readCompacted enabled only get compacted entries try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { Message<byte[]> m1 = consumer.receive(); Message<byte[]> m2 = consumer.receive(); assertNotNull(m1); assertNotNull(m2); assertEquals(m1.getKey(), k1); assertEquals(m2.getKey(), k3); assertEquals(new String(m1.getValue()), "5"); assertEquals(new String(m2.getValue()), "0"); Message<byte[]> none = consumer.receive(2, TimeUnit.SECONDS); assertNull(none); } }
Example #17
Source File: CompactionTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 20000) public void testBatchAndNonBatchEndOfEmptyPayload() throws PulsarClientException, ExecutionException, InterruptedException { final String topic = "persistent://my-property/use/my-ns/testBatchAndNonBatchWithEmptyPayload" + UUID.randomUUID().toString(); // 1.create producer and publish message to the topic. Producer<byte[]> producer = pulsarClient.newProducer() .topic(topic) .enableBatching(true) .batchingMaxPublishDelay(1, TimeUnit.DAYS) .create(); final String k1 = "k1"; final String k2 = "k2"; producer.newMessage().key(k1).value("0".getBytes()).send(); List<CompletableFuture<MessageId>> futures = new ArrayList<>(7); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 1 + "").getBytes()).sendAsync()); } producer.flush(); producer.newMessage().key(k1).value("3".getBytes()).send(); for (int i = 0; i < 2; i++) { futures.add(producer.newMessage().key(k1).value((i + 4 + "").getBytes()).sendAsync()); } producer.flush(); for (int i = 0; i < 3; i++) { futures.add(producer.newMessage().key(k2).value((i + 10 + "").getBytes()).sendAsync()); } producer.flush(); producer.newMessage().key(k2).value("".getBytes()).send(); FutureUtil.waitForAll(futures).get(); // 2.compact the topic. Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // consumer with readCompacted enabled only get compacted entries try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { Message<byte[]> m1 = consumer.receive(); assertNotNull(m1); assertEquals(m1.getKey(), k1); assertEquals(new String(m1.getValue()), "5"); Message<byte[]> none = consumer.receive(2, TimeUnit.SECONDS); assertNull(none); } }
Example #18
Source File: CompactionTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 20000, dataProvider = "lastDeletedBatching") public void testCompactMultipleTimesWithoutEmptyMessage(boolean batchEnabled) throws PulsarClientException, ExecutionException, InterruptedException { final String topic = "persistent://my-property/use/my-ns/testCompactMultipleTimesWithoutEmptyMessage" + UUID.randomUUID().toString(); final int messages = 10; final String key = "1"; // 1.create producer and publish message to the topic. ProducerBuilder<byte[]> builder = pulsarClient.newProducer().topic(topic); if (!batchEnabled) { builder.enableBatching(false); } else { builder.batchingMaxMessages(messages / 5); } Producer<byte[]> producer = builder.create(); List<CompletableFuture<MessageId>> futures = new ArrayList<>(messages); for (int i = 0; i < messages; i++) { futures.add(producer.newMessage().key(key).value((i + "").getBytes()).sendAsync()); } FutureUtil.waitForAll(futures).get(); // 2.compact the topic. Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // 3. Send more ten messages futures.clear(); for (int i = 0; i < messages; i++) { futures.add(producer.newMessage().key(key).value((i + 10 + "").getBytes()).sendAsync()); } FutureUtil.waitForAll(futures).get(); // 4.compact again. compactor.compact(topic).get(); try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe()) { Message<byte[]> m1 = consumer.receive(); assertNotNull(m1); assertEquals(m1.getKey(), key); assertEquals(new String(m1.getValue()), "19"); Message<byte[]> none = consumer.receive(2, TimeUnit.SECONDS); assertNull(none); } }
Example #19
Source File: PartitionedTopicsSchemaTest.java From pulsar with Apache License 2.0 | 4 votes |
/** * Test that sequence id from a producer is correct when there are send errors */ @Test public void partitionedTopicWithSchema() throws Exception { admin.namespaces().createNamespace("prop/my-test", Collections.singleton("usc")); String topicName = "prop/my-test/my-topic"; admin.topics().createPartitionedTopic(topicName, 16); int N = 10; PulsarClient client = PulsarClient.builder().serviceUrl(pulsar.getBrokerServiceUrl()).build(); CompletableFuture<Producer<String>> producerFuture = client.newProducer(Schema.STRING) .topic(topicName) .createAsync(); CompletableFuture<Consumer<String>> consumerFuture = client.newConsumer(Schema.STRING) .topic(topicName) .subscriptionName("sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribeAsync(); CompletableFuture.allOf(producerFuture, consumerFuture).get(); Producer<String> producer = producerFuture.get(); Consumer<String> consumer = consumerFuture.get(); for (int i = 0; i < N; i++) { producer.send("Hello-" + i); } consumer.close(); producer.close(); // Force topic reloading to re-open the schema multiple times in parallel admin.namespaces().unload("prop/my-test"); producerFuture = client.newProducer(Schema.STRING) .topic(topicName) .createAsync(); consumerFuture = client.newConsumer(Schema.STRING) .topic(topicName) .subscriptionName("sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribeAsync(); // Re-opening the topic should succeed CompletableFuture.allOf(producerFuture, consumerFuture).get(); consumer = consumerFuture.get(); Set<String> messages = new TreeSet<>(); for (int i = 0; i < N; i++) { Message<String> msg = consumer.receive(); messages.add(msg.getValue()); consumer.acknowledge(msg); } assertEquals(messages.size(), N); for (int i = 0; i < N; i++) { assertTrue(messages.contains("Hello-" + i)); } client.close(); }
Example #20
Source File: ProxyTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void testRegexSubscription() throws Exception { @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()) .connectionsPerBroker(5).ioThreads(5).build(); // create two topics by subscribing to a topic and closing it try (Consumer<byte[]> ignored = client.newConsumer() .topic("persistent://sample/test/local/regex-sub-topic1") .subscriptionName("proxy-ignored") .subscribe()) { } try (Consumer<byte[]> ignored = client.newConsumer() .topic("persistent://sample/test/local/regex-sub-topic2") .subscriptionName("proxy-ignored") .subscribe()) { } String subName = "regex-sub-proxy-test-" + System.currentTimeMillis(); // make sure regex subscription String regexSubscriptionPattern = "persistent://sample/test/local/regex-sub-topic.*"; log.info("Regex subscribe to topics {}", regexSubscriptionPattern); try (Consumer<byte[]> consumer = client.newConsumer() .topicsPattern(regexSubscriptionPattern) .subscriptionName(subName) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe()) { log.info("Successfully subscribe to topics using regex {}", regexSubscriptionPattern); final int numMessages = 20; try (Producer<byte[]> producer = client.newProducer(Schema.BYTES) .topic("persistent://sample/test/local/topic1") .create()) { for (int i = 0; i < numMessages; i++) { producer.send(("message-" + i).getBytes(UTF_8)); } } for (int i = 0; i < numMessages; i++) { Message<byte[]> msg = consumer.receive(); assertEquals("message-" + i, new String(msg.getValue(), UTF_8)); } } }
Example #21
Source File: SchemaUpdateStrategyTest.java From pulsar with Apache License 2.0 | 4 votes |
private void testAutoUpdateBackward(String namespace, String topicName) throws Exception { ContainerExecResult result = pulsarCluster.runAdminCommandOnAnyBroker( "namespaces", "get-schema-autoupdate-strategy", namespace); Assert.assertEquals(result.getStdout().trim(), "FULL"); pulsarCluster.runAdminCommandOnAnyBroker("namespaces", "set-schema-autoupdate-strategy", "--compatibility", "BACKWARD", namespace); try (PulsarClient pulsarClient = PulsarClient.builder() .serviceUrl(pulsarCluster.getPlainTextServiceUrl()).build()) { V1Data v1Data = new V1Data("test1", 1); try (Producer<V1Data> p = pulsarClient.newProducer(Schema.AVRO(V1Data.class)).topic(topicName).create()) { p.send(v1Data); } log.info("try with forward compat, should fail"); try (Producer<V3Data> p = pulsarClient.newProducer(Schema.AVRO(V3Data.class)).topic(topicName).create()) { Assert.fail("Forward compat schema should be rejected"); } catch (PulsarClientException e) { Assert.assertTrue(e.getMessage().contains("IncompatibleSchemaException")); } log.info("try with backward compat, should succeed"); V2Data v2Data = new V2Data("test2"); try (Producer<V2Data> p = pulsarClient.newProducer(Schema.AVRO(V2Data.class)).topic(topicName).create()) { p.send(v2Data); } Schema<GenericRecord> schema = Schema.AUTO_CONSUME(); try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(schema) .topic(topicName) .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscriptionName("sub") .subscribe() ) { log.info("Schema Info : {}", schema.getSchemaInfo().getSchemaDefinition()); Message<GenericRecord> msg1 = consumer.receive(); v1Data.assertEqualToRecord(msg1.getValue()); Message<GenericRecord> msg2 = consumer.receive(); v2Data.assertEqualToRecord(msg2.getValue()); } } }
Example #22
Source File: SchemaUpdateStrategyTest.java From pulsar with Apache License 2.0 | 4 votes |
private void testNone(String namespace, String topicName) throws Exception { ContainerExecResult result = pulsarCluster.runAdminCommandOnAnyBroker( "namespaces", "get-schema-autoupdate-strategy", namespace); Assert.assertEquals(result.getStdout().trim(), "FULL"); pulsarCluster.runAdminCommandOnAnyBroker("namespaces", "set-schema-autoupdate-strategy", "--compatibility", "NONE", namespace); try (PulsarClient pulsarClient = PulsarClient.builder() .serviceUrl(pulsarCluster.getPlainTextServiceUrl()).build()) { V1Data v1Data = new V1Data("test1", 1); try (Producer<V1Data> p = pulsarClient.newProducer(Schema.AVRO(V1Data.class)).topic(topicName).create()) { p.send(v1Data); } log.info("try with forward compat, should succeed"); V3Data v3Data = new V3Data("test3", 1, 2); try (Producer<V3Data> p = pulsarClient.newProducer(Schema.AVRO(V3Data.class)).topic(topicName).create()) { p.send(v3Data); } log.info("try with backward compat, should succeed"); V2Data v2Data = new V2Data("test2"); try (Producer<V2Data> p = pulsarClient.newProducer(Schema.AVRO(V2Data.class)).topic(topicName).create()) { p.send(v2Data); } Schema<GenericRecord> schema = Schema.AUTO_CONSUME(); try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(schema) .topic(topicName) .subscriptionName("sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe() ) { log.info("Schema Info : {}", schema.getSchemaInfo().getSchemaDefinition()); Message<GenericRecord> msg1 = consumer.receive(); v1Data.assertEqualToRecord(msg1.getValue()); Message<GenericRecord> msg2 = consumer.receive(); v3Data.assertEqualToRecord(msg2.getValue()); Message<GenericRecord> msg3 = consumer.receive(); v2Data.assertEqualToRecord(msg3.getValue()); } } }
Example #23
Source File: SchemaUpdateStrategyTest.java From pulsar with Apache License 2.0 | 4 votes |
private void testAutoUpdateForward(String namespace, String topicName) throws Exception { ContainerExecResult result = pulsarCluster.runAdminCommandOnAnyBroker( "namespaces", "get-schema-autoupdate-strategy", namespace); Assert.assertEquals(result.getStdout().trim(), "FULL"); pulsarCluster.runAdminCommandOnAnyBroker("namespaces", "set-schema-autoupdate-strategy", "--compatibility", "FORWARD", namespace); try (PulsarClient pulsarClient = PulsarClient.builder() .serviceUrl(pulsarCluster.getPlainTextServiceUrl()).build()) { V1Data v1Data = new V1Data("test1", 1); try (Producer<V1Data> p = pulsarClient.newProducer(Schema.AVRO(V1Data.class)).topic(topicName).create()) { p.send(v1Data); } log.info("try with backward compat, should fail"); try (Producer<V2Data> p = pulsarClient.newProducer(Schema.AVRO(V2Data.class)).topic(topicName).create()) { Assert.fail("Backward compat schema should be rejected"); } catch (PulsarClientException e) { Assert.assertTrue(e.getMessage().contains("IncompatibleSchemaException")); } log.info("try with forward compat, should succeed"); V3Data v3Data = new V3Data("test2", 1, 2); try (Producer<V3Data> p = pulsarClient.newProducer(Schema.AVRO(V3Data.class)).topic(topicName).create()) { p.send(v3Data); } Schema<GenericRecord> schema = Schema.AUTO_CONSUME(); try (Consumer<GenericRecord> consumer = pulsarClient.newConsumer(schema) .topic(topicName) .subscriptionName("sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe() ) { log.info("Schema Info : {}", schema.getSchemaInfo().getSchemaDefinition()); Message<GenericRecord> msg1 = consumer.receive(); v1Data.assertEqualToRecord(msg1.getValue()); Message<GenericRecord> msg2 = consumer.receive(); v3Data.assertEqualToRecord(msg2.getValue()); } } }
Example #24
Source File: KafkaApisTest.java From kop with Apache License 2.0 | 4 votes |
@Test(timeOut = 20000) public void testReadUncommittedConsumerListOffsetEarliestOffsetEquals() throws Exception { String topicName = "testReadUncommittedConsumerListOffsetEarliest"; TopicPartition tp = new TopicPartition(topicName, 0); // use producer to create some message to get Limit Offset. String pulsarTopicName = "persistent://public/default/" + topicName; // create partitioned topic. admin.topics().createPartitionedTopic(topicName, 1); // 1. prepare topic: // use kafka producer to produce 10 messages. // use pulsar consumer to get message offset. @Cleanup KProducer kProducer = new KProducer(topicName, false, getKafkaBrokerPort()); int totalMsgs = 10; String messageStrPrefix = topicName + "_message_"; for (int i = 0; i < totalMsgs; i++) { String messageStr = messageStrPrefix + i; kProducer.getProducer() .send(new ProducerRecord<>( topicName, i, messageStr)) .get(); log.debug("Kafka Producer Sent message: ({}, {})", i, messageStr); } @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer() .topic(pulsarTopicName) .subscriptionName(topicName + "_sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); Message<byte[]> msg = consumer.receive(100, TimeUnit.MILLISECONDS); assertNotNull(msg); MessageIdImpl messageId = (MessageIdImpl) ((TopicMessageIdImpl) msg.getMessageId()).getInnerMessageId(); // first entry should be the limit offset. long limitOffset = MessageIdUtils.getOffset(messageId.getLedgerId(), 0); log.info("After create {} messages, get messageId: {} expected earliest limit: {}", totalMsgs, messageId, limitOffset); // 2. real test, for ListOffset request verify Earliest get earliest Map<TopicPartition, Long> targetTimes = Maps.newHashMap(); targetTimes.put(tp, ListOffsetRequest.EARLIEST_TIMESTAMP); ListOffsetRequest.Builder builder = ListOffsetRequest.Builder .forConsumer(true, IsolationLevel.READ_UNCOMMITTED) .setTargetTimes(targetTimes); KafkaHeaderAndRequest request = buildRequest(builder); CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>(); kafkaRequestHandler.handleListOffsetRequest(request, responseFuture); AbstractResponse response = responseFuture.get(); ListOffsetResponse listOffsetResponse = (ListOffsetResponse) response; assertEquals(listOffsetResponse.responseData().get(tp).error, Errors.NONE); assertEquals(listOffsetResponse.responseData().get(tp).offset, Long.valueOf(limitOffset)); assertEquals(listOffsetResponse.responseData().get(tp).timestamp, Long.valueOf(0)); }
Example #25
Source File: GroupCoordinatorTest.java From kop with Apache License 2.0 | 4 votes |
@BeforeMethod @Override public void setup() throws Exception { super.internalSetup(); protocols = newProtocols(); scheduler = OrderedScheduler.newSchedulerBuilder() .name("test-scheduler") .numThreads(1) .build(); admin.clusters().createCluster("test", new ClusterData("http://127.0.0.1:" + brokerWebservicePort)); admin.tenants().createTenant("public", new TenantInfo(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test"))); admin.namespaces().createNamespace("public/default"); admin.namespaces().setNamespaceReplicationClusters("public/default", Sets.newHashSet("test")); admin.namespaces().setRetention("public/default", new RetentionPolicies(20, 100)); GroupConfig groupConfig = new GroupConfig( ConsumerMinSessionTimeout, ConsumerMaxSessionTimeout, GroupInitialRebalanceDelay ); topicName = "test-coordinator-" + System.currentTimeMillis(); OffsetConfig offsetConfig = OffsetConfig.builder().offsetsTopicName(topicName).build(); timer = new MockTimer(); producerBuilder = pulsarClient.newProducer(Schema.BYTEBUFFER); consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(topicName) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); readerBuilder = pulsarClient.newReader(Schema.BYTEBUFFER) .startMessageId(MessageId.earliest); groupPartitionId = 0; otherGroupPartitionId = 1; otherGroupId = "otherGroupId"; offsetConfig.offsetsTopicNumPartitions(4); groupMetadataManager = spy(new GroupMetadataManager( offsetConfig, producerBuilder, readerBuilder, scheduler, timer.time(), id -> { if (groupId.equals(id) || id.isEmpty()) { return groupPartitionId; } else { return otherGroupPartitionId; } } )); assertNotEquals(groupPartitionId, otherGroupPartitionId); DelayedOperationPurgatory<DelayedHeartbeat> heartbeatPurgatory = DelayedOperationPurgatory.<DelayedHeartbeat>builder() .purgatoryName("Heartbeat") .timeoutTimer(timer) .reaperEnabled(false) .build(); DelayedOperationPurgatory<DelayedJoin> joinPurgatory = DelayedOperationPurgatory.<DelayedJoin>builder() .purgatoryName("Rebalance") .timeoutTimer(timer) .reaperEnabled(false) .build(); groupCoordinator = new GroupCoordinator( groupConfig, groupMetadataManager, heartbeatPurgatory, joinPurgatory, timer.time(), new MockOffsetAcker((PulsarClientImpl) pulsarClient) ); // start the group coordinator groupCoordinator.startup(false); // add the partition into the owned partition list groupPartitionId = groupMetadataManager.partitionFor(groupId); groupMetadataManager.addPartitionOwnership(groupPartitionId); }
Example #26
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 4 votes |
@Test public void testExpireOffsetsWithActiveGroup() throws Exception { @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(groupMetadataManager.getTopicPartitionName()) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); String memberId = "memberId"; String clientId = "clientId"; String clientHost = "localhost"; TopicPartition topicPartition1 = new TopicPartition("foo", 0); TopicPartition topicPartition2 = new TopicPartition("foo", 1); long offset = 37; groupMetadataManager.addPartitionOwnership(groupPartitionId); GroupMetadata group = new GroupMetadata(groupId, Empty); groupMetadataManager.addGroup(group); MemberMetadata member = new MemberMetadata( memberId, groupId, clientId, clientHost, rebalanceTimeout, sessionTimeout, protocolType, ImmutableMap.<String, byte[]>builder() .put("protocol", new byte[0]) .build() ); CompletableFuture<JoinGroupResult> memberJoinFuture = new CompletableFuture<>(); member.awaitingJoinCallback(memberJoinFuture); group.add(member); group.transitionTo(PreparingRebalance); group.initNextGeneration(); long startMs = Time.SYSTEM.milliseconds(); Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder() .put(topicPartition1, OffsetAndMetadata.apply(offset, "", startMs, startMs + 1)) .put(topicPartition2, OffsetAndMetadata.apply(offset, "", startMs, startMs + 3)) .build(); Map<TopicPartition, Errors> commitErrors = groupMetadataManager.storeOffsets(group, memberId, offsets).get(); assertTrue(group.hasOffsets()); assertFalse(commitErrors.isEmpty()); assertEquals( Errors.NONE, commitErrors.get(topicPartition1) ); groupMetadataManager.cleanupGroupMetadata().get(); // group should still be there, but the offsets should be gone assertEquals( Optional.of(group), groupMetadataManager.getGroup(groupId) ); assertEquals( Optional.empty(), group.offset(topicPartition1) ); assertEquals( Optional.empty(), group.offset(topicPartition2) ); Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets( groupId, Optional.of(Lists.newArrayList( topicPartition1, topicPartition2 )) ); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition1).offset); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition2).offset); }
Example #27
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 4 votes |
@Test public void testExpireGroupWithOffsetsOnly() throws Exception { @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(groupMetadataManager.getTopicPartitionName()) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); // verify that the group is removed properly, but no tombstone is written if // this is a group which is only using kafka for offset storage String memberId = ""; TopicPartition topicPartition1 = new TopicPartition("foo", 0); TopicPartition topicPartition2 = new TopicPartition("foo", 1); long offset = 37; groupMetadataManager.addPartitionOwnership(groupPartitionId); GroupMetadata group = new GroupMetadata(groupId, Empty); groupMetadataManager.addGroup(group); long startMs = Time.SYSTEM.milliseconds(); Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder() .put(topicPartition1, OffsetAndMetadata.apply(offset, "", startMs, startMs + 1)) .put(topicPartition2, OffsetAndMetadata.apply(offset, "", startMs, startMs + 3)) .build(); Map<TopicPartition, Errors> commitErrors = groupMetadataManager.storeOffsets(group, memberId, offsets).get(); assertTrue(group.hasOffsets()); assertFalse(commitErrors.isEmpty()); assertEquals( Errors.NONE, commitErrors.get(topicPartition1) ); groupMetadataManager.cleanupGroupMetadata().get(); Message<ByteBuffer> message = consumer.receive(); // skip `storeOffsets` op, bypass place holder message. while (!message.hasKey() || GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(message.getKeyBytes())) instanceof OffsetKey) { message = consumer.receive(); } assertTrue(message.getEventTime() > 0L); assertTrue(message.hasKey()); byte[] key = message.getKeyBytes(); BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key)); assertTrue(groupKey instanceof GroupMetadataKey); GroupMetadataKey gmk = (GroupMetadataKey) groupKey; assertEquals(groupId, gmk.key()); ByteBuffer value = message.getValue(); MemoryRecords memRecords = MemoryRecords.readableRecords(value); AtomicInteger verified = new AtomicInteger(2); memRecords.batches().forEach(batch -> { assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic()); assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); for (Record record : batch) { verified.decrementAndGet(); assertTrue(record.hasKey()); assertFalse(record.hasValue()); assertTrue(record.timestamp() > 0); BaseKey bk = GroupMetadataConstants.readMessageKey(record.key()); assertTrue(bk instanceof OffsetKey); OffsetKey ok = (OffsetKey) bk; assertEquals(groupId, ok.key().group()); assertEquals("foo", ok.key().topicPartition().topic()); } }); assertEquals(0, verified.get()); assertEquals(Optional.empty(), groupMetadataManager.getGroup(groupId)); Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets( groupId, Optional.of(Lists.newArrayList( topicPartition1, topicPartition2 )) ); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition1).offset); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition2).offset); }
Example #28
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 4 votes |
@Test public void testGroupMetadataRemoval() throws Exception { @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(groupMetadataManager.getTopicPartitionName()) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); TopicPartition topicPartition1 = new TopicPartition("foo", 0); TopicPartition topicPartition2 = new TopicPartition("foo", 1); groupMetadataManager.addPartitionOwnership(groupPartitionId); GroupMetadata group = new GroupMetadata(groupId, Empty); groupMetadataManager.addGroup(group); group.generationId(5); groupMetadataManager.cleanupGroupMetadata().get(); Message<ByteBuffer> message = consumer.receive(); while (message.getValue().array().length == 0) { // bypass above place holder message. message = consumer.receive(); } assertTrue(message.getEventTime() > 0L); assertTrue(message.hasKey()); byte[] key = message.getKeyBytes(); BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key)); assertTrue(groupKey instanceof GroupMetadataKey); GroupMetadataKey groupMetadataKey = (GroupMetadataKey) groupKey; assertEquals(groupId, groupMetadataKey.key()); ByteBuffer value = message.getValue(); MemoryRecords memRecords = MemoryRecords.readableRecords(value); AtomicBoolean verified = new AtomicBoolean(false); memRecords.batches().forEach(batch -> { assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic()); assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); for (Record record : batch) { assertFalse(verified.get()); assertTrue(record.hasKey()); assertFalse(record.hasValue()); assertTrue(record.timestamp() > 0); BaseKey bk = GroupMetadataConstants.readMessageKey(record.key()); assertTrue(bk instanceof GroupMetadataKey); GroupMetadataKey gmk = (GroupMetadataKey) bk; assertEquals(groupId, gmk.key()); verified.set(true); } }); assertTrue(verified.get()); assertEquals(Optional.empty(), groupMetadataManager.getGroup(groupId)); Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets( groupId, Optional.of(Lists.newArrayList( topicPartition1, topicPartition2 )) ); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition1).offset); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition2).offset); }
Example #29
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 4 votes |
@Test public void testExpiredOffset() throws Exception { @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(groupMetadataManager.getTopicPartitionName()) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); String memberId = "fakeMemberId"; TopicPartition topicPartition1 = new TopicPartition("foo", 0); TopicPartition topicPartition2 = new TopicPartition("foo", 1); groupMetadataManager.addPartitionOwnership(groupPartitionId); long offset = 37L; GroupMetadata group = new GroupMetadata(groupId, Empty); groupMetadataManager.addGroup(group); // 1 offset expire soon. the other expire after 5 Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder() .put(topicPartition1, OffsetAndMetadata.apply( offset, "", Time.SYSTEM.milliseconds(), Time.SYSTEM.milliseconds() + 1)) .put(topicPartition2, OffsetAndMetadata.apply( offset, "", Time.SYSTEM.milliseconds(), Time.SYSTEM.milliseconds() + 5000)) .build(); Map<TopicPartition, Errors> commitErrors = groupMetadataManager.storeOffsets( group, memberId, offsets ).get(); assertTrue(group.hasOffsets()); assertFalse(commitErrors.isEmpty()); Errors maybeError = commitErrors.get(topicPartition1); assertEquals(Errors.NONE, maybeError); groupMetadataManager.cleanupGroupMetadata(); assertEquals(Optional.of(group), groupMetadataManager.getGroup(groupId)); assertEquals(Optional.empty(), group.offset(topicPartition1)); assertEquals(Optional.of(offset), group.offset(topicPartition2).map(OffsetAndMetadata::offset)); Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets( groupId, Optional.of(Lists.newArrayList( topicPartition1, topicPartition2 )) ); assertEquals( OffsetFetchResponse.INVALID_OFFSET, cachedOffsets.get(topicPartition1).offset); assertEquals( offset, cachedOffsets.get(topicPartition2).offset); }
Example #30
Source File: GroupMetadataManagerTest.java From kop with Apache License 2.0 | 4 votes |
@Test public void testTransactionalCommitOffsetAborted() throws Exception { @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER) .topic(groupMetadataManager.getTopicPartitionName()) .subscriptionName("test-sub") .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest) .subscribe(); String memberId = ""; TopicPartition topicPartition = new TopicPartition("foo", 0); long offset = 37L; long producerId = 232L; short producerEpoch = 0; GroupMetadataManager spyGroupManager = spy(groupMetadataManager); spyGroupManager.addPartitionOwnership(groupPartitionId); GroupMetadata group = new GroupMetadata(groupId, Empty); spyGroupManager.addGroup(group); Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder() .put(topicPartition, OffsetAndMetadata.apply(offset)) .build(); CompletableFuture<MessageId> writeOffsetMessageFuture = new CompletableFuture<>(); AtomicReference<CompletableFuture<MessageId>> realWriteFutureRef = new AtomicReference<>(); doAnswer(invocationOnMock -> { CompletableFuture<MessageId> realWriteFuture = (CompletableFuture<MessageId>) invocationOnMock.callRealMethod(); realWriteFutureRef.set(realWriteFuture); return writeOffsetMessageFuture; }).when(spyGroupManager).storeOffsetMessage( any(String.class), any(byte[].class), any(ByteBuffer.class), anyLong() ); CompletableFuture<Map<TopicPartition, Errors>> storeFuture = spyGroupManager.storeOffsets( group, memberId, offsets, producerId, producerEpoch ); assertTrue(group.hasOffsets()); assertTrue(group.allOffsets().isEmpty()); // complete the write message writeOffsetMessageFuture.complete(realWriteFutureRef.get().get()); Map<TopicPartition, Errors> commitErrors = storeFuture.get(); assertFalse(commitErrors.isEmpty()); Errors maybeError = commitErrors.get(topicPartition); assertEquals(Errors.NONE, maybeError); assertTrue(group.hasOffsets()); assertTrue(group.allOffsets().isEmpty()); group.completePendingTxnOffsetCommit(producerId, false); assertFalse(group.hasOffsets()); assertTrue(group.allOffsets().isEmpty()); }