Java Code Examples for org.apache.pulsar.client.api.Producer#send()
The following examples show how to use
org.apache.pulsar.client.api.Producer#send() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopicTerminationTest.java From pulsar with Apache License 2.0 | 6 votes |
@Test public void testSimpleTermination() throws Exception { Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); /* MessageId msgId1 = */producer.send("test-msg-1".getBytes()); /* MessageId msgId2 = */producer.send("test-msg-2".getBytes()); MessageId msgId3 = producer.send("test-msg-3".getBytes()); MessageId lastMessageId = admin.topics().terminateTopicAsync(topicName).get(); assertEquals(lastMessageId, msgId3); try { producer.send("test-msg-4".getBytes()); fail("Should have thrown exception"); } catch (PulsarClientException.TopicTerminatedException e) { // Expected } }
Example 2
Source File: V1_ProducerConsumerTest.java From pulsar with Apache License 2.0 | 6 votes |
@Test public void testSendBigMessageSize() throws Exception { log.info("-- Starting {} test --", methodName); final String topic = "persistent://my-property/use/my-ns/bigMsg"; Producer<byte[]> producer = pulsarClient.newProducer().topic(topic).create(); // Messages are allowed up to MaxMessageSize producer.newMessage().value(new byte[Commands.DEFAULT_MAX_MESSAGE_SIZE]); try { producer.send(new byte[Commands.DEFAULT_MAX_MESSAGE_SIZE + 1]); fail("Should have thrown exception"); } catch (PulsarClientException.InvalidMessageException e) { // OK } }
Example 3
Source File: producers.java From Groza with Apache License 2.0 | 5 votes |
public static void PulsarClient() throws PulsarClientException { PulsarClient client = PulsarClient.builder() .serviceUrl("pulsar://localhost:6650") .build(); Producer<byte[]> producer = client.newProducer() .topic("my-topic") .create(); producer.send("My message".getBytes()); client.close(); producer.close(); }
Example 4
Source File: KeyStoreTlsProducerConsumerTestWithoutAuth.java From pulsar with Apache License 2.0 | 5 votes |
/** * verifies that messages whose size is larger than 2^14 bytes (max size of single TLS chunk) can be * produced/consumed * * @throws Exception */ @Test(timeOut = 30000) public void testTlsLargeSizeMessage() throws Exception { log.info("-- Starting {} test --", methodName); final int MESSAGE_SIZE = 16 * 1024 + 1; log.info("-- message size --", MESSAGE_SIZE); String topicName = "persistent://my-property/use/my-ns/testTlsLargeSizeMessage" + System.currentTimeMillis(); internalSetUpForClient(true, pulsar.getBrokerServiceUrlTls()); internalSetUpForNamespace(); Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName) .subscriptionName("my-subscriber-name").subscribe(); Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName) .create(); for (int i = 0; i < 10; i++) { byte[] message = new byte[MESSAGE_SIZE]; Arrays.fill(message, (byte) i); producer.send(message); } Message<byte[]> msg = null; for (int i = 0; i < 10; i++) { msg = consumer.receive(5, TimeUnit.SECONDS); byte[] expected = new byte[MESSAGE_SIZE]; Arrays.fill(expected, (byte) i); Assert.assertEquals(expected, msg.getData()); } // Acknowledge the consumption of all messages at once consumer.acknowledgeCumulative(msg); consumer.close(); log.info("-- Exiting {} test --", methodName); }
Example 5
Source File: BrokerServiceTest.java From pulsar with Apache License 2.0 | 5 votes |
@Test public void testStatsOfStorageSizeWithSubscription() throws Exception { final String topicName = "persistent://prop/ns-abc/no-subscription"; Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).create(); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); assertNotNull(topicRef); assertEquals(topicRef.getStats(false).storageSize, 0); for (int i = 0; i < 10; i++) { producer.send(new byte[10]); } assertTrue(topicRef.getStats(false).storageSize > 0); }
Example 6
Source File: ProxyParserTest.java From pulsar with Apache License 2.0 | 5 votes |
@Test public void testProducer() throws Exception { PulsarClient client = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()) .build(); Producer<byte[]> producer = client.newProducer(Schema.BYTES).topic("persistent://sample/test/local/producer-topic") .create(); for (int i = 0; i < 10; i++) { producer.send("test".getBytes()); } client.close(); }
Example 7
Source File: PersistentTopicE2ETest.java From pulsar with Apache License 2.0 | 5 votes |
@Test(dataProvider = "codec") public void testCompression(CompressionType compressionType) throws Exception { final String topicName = "persistent://prop/ns-abc/topic0" + compressionType; // 1. producer connect Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .compressionType(compressionType) .create(); Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub").subscribe(); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); assertNotNull(topicRef); assertEquals(topicRef.getProducers().size(), 1); // 2. producer publish messages for (int i = 0; i < 10; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } for (int i = 0; i < 10; i++) { Message<byte[]> msg = consumer.receive(5, TimeUnit.SECONDS); assertNotNull(msg); assertEquals(msg.getData(), ("my-message-" + i).getBytes()); } // 3. producer disconnect producer.close(); consumer.close(); }
Example 8
Source File: ProxyKeyStoreTlsTestWithoutAuth.java From pulsar with Apache License 2.0 | 5 votes |
@Test public void testPartitions() throws Exception { @Cleanup PulsarClient client = internalSetUpForClient(true, proxyService.getServiceUrlTls()); String topicName = "persistent://sample/test/local/partitioned-topic" + System.currentTimeMillis(); TenantInfo tenantInfo = createDefaultTenantInfo(); admin.tenants().createTenant("sample", tenantInfo); admin.topics().createPartitionedTopic(topicName, 2); @Cleanup Producer<byte[]> producer = client.newProducer(Schema.BYTES).topic(topicName) .messageRoutingMode(MessageRoutingMode.RoundRobinPartition).create(); // Create a consumer directly attached to broker @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName) .subscriptionName("my-sub").subscribe(); for (int i = 0; i < 10; i++) { producer.send("test".getBytes()); } for (int i = 0; i < 10; i++) { Message<byte[]> msg = consumer.receive(1, TimeUnit.SECONDS); checkNotNull(msg); } }
Example 9
Source File: TlsProducerConsumerTest.java From pulsar with Apache License 2.0 | 5 votes |
/** * verifies that messages whose size is larger than 2^14 bytes (max size of single TLS chunk) can be * produced/consumed * * @throws Exception */ @Test(timeOut = 30000) public void testTlsLargeSizeMessage() throws Exception { log.info("-- Starting {} test --", methodName); final int MESSAGE_SIZE = 16 * 1024 + 1; log.info("-- message size --", MESSAGE_SIZE); internalSetUpForClient(true, pulsar.getBrokerServiceUrlTls()); internalSetUpForNamespace(); Consumer<byte[]> consumer = pulsarClient.newConsumer().topic("persistent://my-property/use/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); Producer<byte[]> producer = pulsarClient.newProducer().topic("persistent://my-property/use/my-ns/my-topic1") .create(); for (int i = 0; i < 10; i++) { byte[] message = new byte[MESSAGE_SIZE]; Arrays.fill(message, (byte) i); producer.send(message); } Message<byte[]> msg = null; for (int i = 0; i < 10; i++) { msg = consumer.receive(5, TimeUnit.SECONDS); byte[] expected = new byte[MESSAGE_SIZE]; Arrays.fill(expected, (byte) i); Assert.assertEquals(expected, msg.getData()); } // Acknowledge the consumption of all messages at once consumer.acknowledgeCumulative(msg); consumer.close(); log.info("-- Exiting {} test --", methodName); }
Example 10
Source File: PersistentTopicE2ETest.java From pulsar with Apache License 2.0 | 5 votes |
@Test public void testSimpleProducerEvents() throws Exception { final String topicName = "persistent://prop/ns-abc/topic0"; // 1. producer connect Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); assertNotNull(topicRef); assertEquals(topicRef.getProducers().size(), 1); // 2. producer publish messages for (int i = 0; i < 10; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } rolloverPerIntervalStats(); assertTrue(topicRef.getProducers().values().iterator().next().getStats().msgRateIn > 0.0); // 3. producer disconnect producer.close(); Thread.sleep(ASYNC_EVENT_COMPLETION_WAIT); assertEquals(topicRef.getProducers().size(), 0); }
Example 11
Source File: BatchMessageTest.java From pulsar with Apache License 2.0 | 5 votes |
@Test(dataProvider = "codecAndContainerBuilder") public void testSendOverSizeMessage(CompressionType compressionType, BatcherBuilder builder) throws Exception { final int numMsgs = 10; final String topicName = "persistent://prop/ns-abc/testSendOverSizeMessage-" + UUID.randomUUID(); Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName) .batchingMaxPublishDelay(1, TimeUnit.MILLISECONDS) .batchingMaxMessages(2) .enableBatching(true) .compressionType(compressionType) .batcherBuilder(builder) .create(); try { producer.send(new byte[1024 * 1024 * 10]); } catch (PulsarClientException e) { assertTrue(e instanceof PulsarClientException.InvalidMessageException); } for (int i = 0; i < numMsgs; i++) { producer.send(new byte[1024]); } producer.close(); }
Example 12
Source File: ProxyStatsTest.java From pulsar with Apache License 2.0 | 4 votes |
/** * Validate proxy topic stats api * * @throws Exception */ @Test public void testTopicStats() throws Exception { proxyService.setProxyLogLevel(2); final String topicName = "persistent://sample/test/local/topic-stats"; final String topicName2 = "persistent://sample/test/local/topic-stats-2"; PulsarClient client = PulsarClient.builder().serviceUrl(proxyService.getServiceUrl()).build(); Producer<byte[]> producer1 = client.newProducer(Schema.BYTES).topic(topicName).enableBatching(false) .producerName("producer1").messageRoutingMode(MessageRoutingMode.SinglePartition).create(); Producer<byte[]> producer2 = client.newProducer(Schema.BYTES).topic(topicName2).enableBatching(false) .producerName("producer2").messageRoutingMode(MessageRoutingMode.SinglePartition).create(); // Create a consumer directly attached to broker Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub").subscribe(); Consumer<byte[]> consumer2 = pulsarClient.newConsumer().topic(topicName2).subscriptionName("my-sub") .subscribe(); int totalMessages = 10; for (int i = 0; i < totalMessages; i++) { producer1.send("test".getBytes()); producer2.send("test".getBytes()); } for (int i = 0; i < totalMessages; i++) { Message<byte[]> msg = consumer.receive(1, TimeUnit.SECONDS); checkNotNull(msg); consumer.acknowledge(msg); msg = consumer2.receive(1, TimeUnit.SECONDS); } Client httpClient = ClientBuilder.newClient(new ClientConfig().register(LoggingFeature.class)); Response r = httpClient.target(proxyWebServer.getServiceUri()).path("/proxy-stats/topics").request() .get(); Assert.assertEquals(r.getStatus(), Response.Status.OK.getStatusCode()); String response = r.readEntity(String.class).trim(); Map<String, TopicStats> topicStats = new Gson().fromJson(response, new TypeToken<Map<String, TopicStats>>() { }.getType()); assertNotNull(topicStats.get(topicName)); consumer.close(); consumer2.close(); client.close(); }
Example 13
Source File: PersistentTopicE2ETest.java From pulsar with Apache License 2.0 | 4 votes |
/** * Verify: 1. Broker should not replay already acknowledged messages 2. Dispatcher should not stuck while * dispatching new messages due to previous-replay of invalid/already-acked messages * * @throws Exception */ @Test public void testMessageReplay() throws Exception { final String topicName = "persistent://prop/ns-abc/topic2"; final String subName = "sub2"; Message<byte[]> msg; int totalMessages = 10; int replayIndex = totalMessages / 2; Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscriptionType(SubscriptionType.Shared).receiverQueueSize(1).subscribe(); Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); assertNotNull(topicRef); PersistentSubscription subRef = topicRef.getSubscription(subName); PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) subRef .getDispatcher(); Field replayMap = PersistentDispatcherMultipleConsumers.class.getDeclaredField("messagesToRedeliver"); replayMap.setAccessible(true); ConcurrentLongPairSet messagesToReplay = new ConcurrentLongPairSet(64, 1); assertNotNull(subRef); // (1) Produce messages for (int i = 0; i < totalMessages; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } MessageIdImpl firstAckedMsg = null; // (2) Consume and ack messages except first message for (int i = 0; i < totalMessages; i++) { msg = consumer.receive(); consumer.acknowledge(msg); MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId(); if (i == 0) { firstAckedMsg = msgId; } if (i < replayIndex) { // (3) accumulate acked messages for replay messagesToReplay.add(msgId.getLedgerId(), msgId.getEntryId()); } } // (4) redelivery : should redeliver only unacked messages Thread.sleep(1000); replayMap.set(dispatcher, messagesToReplay); // (a) redelivery with all acked-message should clear messageReply bucket dispatcher.redeliverUnacknowledgedMessages(dispatcher.getConsumers().get(0)); assertEquals(messagesToReplay.size(), 0); // (b) fill messageReplyBucket with already acked entry again: and try to publish new msg and read it messagesToReplay.add(firstAckedMsg.getLedgerId(), firstAckedMsg.getEntryId()); replayMap.set(dispatcher, messagesToReplay); // send new message final String testMsg = "testMsg"; producer.send(testMsg.getBytes()); // consumer should be able to receive only new message and not the dispatcher.consumerFlow(dispatcher.getConsumers().get(0), 1); msg = consumer.receive(1, TimeUnit.SECONDS); assertNotNull(msg); assertEquals(msg.getData(), testMsg.getBytes()); consumer.close(); producer.close(); }
Example 14
Source File: ProxyAuthenticatedProducerConsumerTest.java From pulsar with Apache License 2.0 | 4 votes |
/** * <pre> * It verifies e2e tls + Authentication + Authorization (client -> proxy -> broker> * * 1. client connects to proxy over tls and pass auth-data * 2. proxy authenticate client and retrieve client-role * and send it to broker as originalPrincipal over tls * 3. client creates producer/consumer via proxy * 4. broker authorize producer/consumer create request using originalPrincipal * * </pre> * * @throws Exception */ @SuppressWarnings("deprecation") @Test public void testTlsSyncProducerAndConsumer() throws Exception { log.info("-- Starting {} test --", methodName); final String proxyServiceUrl = proxyService.getServiceUrlTls(); Map<String, String> authParams = Maps.newHashMap(); authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); // create a client which connects to proxy over tls and pass authData PulsarClient proxyClient = createPulsarClient(authTls, proxyServiceUrl); admin.clusters().createCluster(configClusterName, new ClusterData(brokerUrl.toString(), brokerUrlTls.toString(), pulsar.getBrokerServiceUrl(), pulsar.getBrokerServiceUrlTls())); admin.tenants().createTenant("my-property", new TenantInfo(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("test"))); admin.namespaces().createNamespace("my-property/my-ns", Sets.newHashSet("test")); Consumer<byte[]> consumer = proxyClient.newConsumer().topic("persistent://my-property/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); Producer<byte[]> producer = proxyClient.newProducer(Schema.BYTES).topic("persistent://my-property/my-ns/my-topic1") .create(); final int msgs = 10; for (int i = 0; i < msgs; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } Message<byte[]> msg = null; Set<String> messageSet = Sets.newHashSet(); int count = 0; for (int i = 0; i < 10; i++) { msg = consumer.receive(5, TimeUnit.SECONDS); String receivedMessage = new String(msg.getData()); log.debug("Received message: [{}]", receivedMessage); String expectedMessage = "my-message-" + i; testMessageOrderAndDuplicates(messageSet, receivedMessage, expectedMessage); count++; } // Acknowledge the consumption of all messages at once Assert.assertEquals(msgs, count); consumer.acknowledgeCumulative(msg); consumer.close(); log.info("-- Exiting {} test --", methodName); }
Example 15
Source File: AdminApiTest2.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 30000) public void testPreciseBacklog() throws PulsarClientException, PulsarAdminException, InterruptedException { final String topic = "persistent://prop-xyz/ns1/precise-back-log"; final String subName = "sub-name"; @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(pulsar.getWebServiceAddress()).build(); @Cleanup Consumer<byte[]> consumer = client.newConsumer() .topic(topic) .subscriptionName(subName) .subscribe(); @Cleanup Producer<byte[]> producer = client.newProducer() .topic(topic) .enableBatching(false) .create(); producer.send("message-1".getBytes(StandardCharsets.UTF_8)); Message<byte[]> message = consumer.receive(); assertNotNull(message); // Mock the entries added count. Default is disable the precise backlog, so the backlog is entries added count - consumed count // Since message have not acked, so the backlog is 10 PersistentSubscription subscription = (PersistentSubscription)pulsar.getBrokerService().getTopicReference(topic).get().getSubscription(subName); assertNotNull(subscription); ((ManagedLedgerImpl)subscription.getCursor().getManagedLedger()).setEntriesAddedCounter(10L); TopicStats topicStats = admin.topics().getStats(topic); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 10); topicStats = admin.topics().getStats(topic, true); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 1); consumer.acknowledge(message); // wait for ack send Thread.sleep(500); // Consumer acks the message, so the precise backlog is 0 topicStats = admin.topics().getStats(topic, true); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 0); topicStats = admin.topics().getStats(topic); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 9); }
Example 16
Source File: SchemaCompatibilityCheckTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test(dataProvider = "AllCheckSchemaCompatibilityStrategy") public void testIsAutoUpdateSchema(SchemaCompatibilityStrategy schemaCompatibilityStrategy) throws Exception { final String tenant = PUBLIC_TENANT; final String topic = "test-consumer-compatibility"; String namespace = "test-namespace-" + randomName(16); String fqtn = TopicName.get( TopicDomain.persistent.value(), tenant, namespace, topic ).toString(); NamespaceName namespaceName = NamespaceName.get(tenant, namespace); admin.namespaces().createNamespace( tenant + "/" + namespace, Sets.newHashSet(CLUSTER_NAME) ); assertEquals(admin.namespaces().getSchemaCompatibilityStrategy(namespaceName.toString()), SchemaCompatibilityStrategy.FULL); admin.namespaces().setSchemaCompatibilityStrategy(namespaceName.toString(), schemaCompatibilityStrategy); admin.schemas().createSchema(fqtn, Schema.AVRO(Schemas.PersonOne.class).getSchemaInfo()); admin.namespaces().setIsAllowAutoUpdateSchema(namespaceName.toString(), false); ProducerBuilder<Schemas.PersonTwo> producerThreeBuilder = pulsarClient .newProducer(Schema.AVRO(SchemaDefinition.<Schemas.PersonTwo>builder().withAlwaysAllowNull (false).withSupportSchemaVersioning(true). withPojo(Schemas.PersonTwo.class).build())) .topic(fqtn); try { producerThreeBuilder.create(); } catch (Exception e) { Assert.assertTrue(e.getMessage().contains("Schema not found and schema auto updating is disabled.")); } admin.namespaces().setIsAllowAutoUpdateSchema(namespaceName.toString(), true); ConsumerBuilder<Schemas.PersonTwo> comsumerBuilder = pulsarClient.newConsumer(Schema.AVRO( SchemaDefinition.<Schemas.PersonTwo>builder().withAlwaysAllowNull (false).withSupportSchemaVersioning(true). withPojo(Schemas.PersonTwo.class).build())) .subscriptionName("test") .topic(fqtn); Producer<Schemas.PersonTwo> producer = producerThreeBuilder.create(); Consumer<Schemas.PersonTwo> consumerTwo = comsumerBuilder.subscribe(); producer.send(new Schemas.PersonTwo(2, "Lucy")); Message<Schemas.PersonTwo> message = consumerTwo.receive(); Schemas.PersonTwo personTwo = message.getValue(); consumerTwo.acknowledge(message); assertEquals(personTwo.getId(), 2); assertEquals(personTwo.getName(), "Lucy"); producer.close(); consumerTwo.close(); admin.namespaces().setIsAllowAutoUpdateSchema(namespaceName.toString(), false); producer = producerThreeBuilder.create(); consumerTwo = comsumerBuilder.subscribe(); producer.send(new Schemas.PersonTwo(2, "Lucy")); message = consumerTwo.receive(); personTwo = message.getValue(); consumerTwo.acknowledge(message); assertEquals(personTwo.getId(), 2); assertEquals(personTwo.getName(), "Lucy"); consumerTwo.close(); producer.close(); }
Example 17
Source File: ProxyWithoutServiceDiscoveryTest.java From pulsar with Apache License 2.0 | 4 votes |
/** * <pre> * It verifies e2e tls + Authentication + Authorization (client -> proxy -> broker> * * 1. client connects to proxy over tls and pass auth-data * 2. proxy authenticate client and retrieve client-role * and send it to broker as originalPrincipal over tls * 3. client creates producer/consumer via proxy * 4. broker authorize producer/consumer create request using originalPrincipal * * </pre> * * @throws Exception */ @Test public void testDiscoveryService() throws Exception { log.info("-- Starting {} test --", methodName); Map<String, String> authParams = Maps.newHashMap(); authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH); authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH); Authentication authTls = new AuthenticationTls(); authTls.configure(authParams); // create a client which connects to proxy over tls and pass authData PulsarClient proxyClient = createPulsarClient(authTls, proxyService.getServiceUrlTls()); admin.clusters().createCluster("without-service-discovery", new ClusterData(brokerUrl.toString())); admin.tenants().createTenant("my-property", new TenantInfo(Sets.newHashSet("appid1", "appid2"), Sets.newHashSet("without-service-discovery"))); admin.namespaces().createNamespace("my-property/without-service-discovery/my-ns"); Consumer<byte[]> consumer = proxyClient.newConsumer() .topic("persistent://my-property/without-service-discovery/my-ns/my-topic1") .subscriptionName("my-subscriber-name").subscribe(); Producer<byte[]> producer = proxyClient.newProducer(Schema.BYTES) .topic("persistent://my-property/without-service-discovery/my-ns/my-topic1").create(); final int msgs = 10; for (int i = 0; i < msgs; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } Message<byte[]> msg = null; Set<String> messageSet = Sets.newHashSet(); int count = 0; for (int i = 0; i < 10; i++) { msg = consumer.receive(5, TimeUnit.SECONDS); String receivedMessage = new String(msg.getData()); log.debug("Received message: [{}]", receivedMessage); String expectedMessage = "my-message-" + i; testMessageOrderAndDuplicates(messageSet, receivedMessage, expectedMessage); count++; } // Acknowledge the consumption of all messages at once Assert.assertEquals(msgs, count); consumer.acknowledgeCumulative(msg); consumer.close(); log.info("-- Exiting {} test --", methodName); }
Example 18
Source File: ConsumedLedgersTrimTest.java From pulsar with Apache License 2.0 | 4 votes |
@Test public void TestConsumedLedgersTrim() throws Exception { conf.setRetentionCheckIntervalInSeconds(1); super.baseSetup(); final String topicName = "persistent://prop/ns-abc/TestConsumedLedgersTrim"; final String subscriptionName = "my-subscriber-name"; @Cleanup Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .producerName("producer-name") .create(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName) .subscribe(); Topic topicRef = pulsar.getBrokerService().getTopicReference(topicName).get(); Assert.assertNotNull(topicRef); PersistentTopic persistentTopic = (PersistentTopic) pulsar.getBrokerService().getOrCreateTopic(topicName).get(); ManagedLedgerConfig managedLedgerConfig = persistentTopic.getManagedLedger().getConfig(); managedLedgerConfig.setRetentionSizeInMB(1L); managedLedgerConfig.setRetentionTime(1, TimeUnit.SECONDS); managedLedgerConfig.setMaxEntriesPerLedger(2); managedLedgerConfig.setMinimumRolloverTime(1, TimeUnit.MILLISECONDS); int msgNum = 10; for (int i = 0; i < msgNum; i++) { producer.send(new byte[1024 * 1024]); } ManagedLedgerImpl managedLedger = (ManagedLedgerImpl) persistentTopic.getManagedLedger(); Assert.assertEquals(managedLedger.getLedgersInfoAsList().size(), msgNum / 2); //no traffic, unconsumed ledger will be retained Thread.sleep(1200); Assert.assertEquals(managedLedger.getLedgersInfoAsList().size(), msgNum / 2); for (int i = 0; i < msgNum; i++) { Message<byte[]> msg = consumer.receive(2, TimeUnit.SECONDS); Assert.assertTrue(msg != null); consumer.acknowledge(msg); } Assert.assertEquals(managedLedger.getLedgersInfoAsList().size(), msgNum / 2); //no traffic, but consumed ledger will be cleaned Thread.sleep(1500); Assert.assertEquals(managedLedger.getLedgersInfoAsList().size(), 1); }
Example 19
Source File: AdminApiTest2.java From pulsar with Apache License 2.0 | 4 votes |
@Test(timeOut = 30000) public void testBacklogNoDelayedForPartitionedTopic() throws PulsarClientException, PulsarAdminException, InterruptedException { final String topic = "persistent://prop-xyz/ns1/precise-back-log-no-delayed-partitioned-topic"; admin.topics().createPartitionedTopic(topic, 2); final String subName = "sub-name"; @Cleanup PulsarClient client = PulsarClient.builder().serviceUrl(pulsar.getWebServiceAddress()).build(); @Cleanup Consumer<byte[]> consumer = client.newConsumer() .topic(topic) .subscriptionName(subName) .subscriptionType(SubscriptionType.Shared) .subscribe(); @Cleanup Producer<byte[]> producer = client.newProducer() .topic(topic) .enableBatching(false) .create(); for (int i = 0; i < 10; i++) { if (i > 4) { producer.newMessage() .value("message-1".getBytes(StandardCharsets.UTF_8)) .deliverAfter(10, TimeUnit.SECONDS) .send(); } else { producer.send("message-1".getBytes(StandardCharsets.UTF_8)); } } TopicStats topicStats = admin.topics().getPartitionedStats(topic, false, true); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 10); assertEquals(topicStats.subscriptions.get(subName).msgBacklogNoDelayed, 5); for (int i = 0; i < 5; i++) { consumer.acknowledge(consumer.receive()); } // Wait the ack send. Thread.sleep(500); topicStats = admin.topics().getPartitionedStats(topic, false, true); assertEquals(topicStats.subscriptions.get(subName).msgBacklog, 5); assertEquals(topicStats.subscriptions.get(subName).msgBacklogNoDelayed, 0); }
Example 20
Source File: TopicsConsumerImplTest.java From pulsar with Apache License 2.0 | 4 votes |
/** * Test Listener for github issue #2547 */ @Test(timeOut = 30000) public void testMultiTopicsMessageListener() throws Exception { String key = "MultiTopicsMessageListenerTest"; final String subscriptionName = "my-ex-subscription-" + key; final String messagePredicate = "my-message-" + key + "-"; final int totalMessages = 6; // set latch larger than totalMessages, so timeout message get resend CountDownLatch latch = new CountDownLatch(totalMessages * 3); final String topicName1 = "persistent://prop/use/ns-abc/topic-1-" + key; List<String> topicNames = Lists.newArrayList(topicName1); TenantInfo tenantInfo = createDefaultTenantInfo(); admin.tenants().createTenant("prop", tenantInfo); admin.topics().createPartitionedTopic(topicName1, 2); // 1. producer connect Producer<byte[]> producer1 = pulsarClient.newProducer().topic(topicName1) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); // 2. Create consumer, set not ack in message listener, so time-out message will resend Consumer<byte[]> consumer = pulsarClient.newConsumer() .topics(topicNames) .subscriptionName(subscriptionName) .subscriptionType(SubscriptionType.Shared) .ackTimeout(1000, TimeUnit.MILLISECONDS) .receiverQueueSize(100) .messageListener((c1, msg) -> { assertNotNull(msg, "Message cannot be null"); String receivedMessage = new String(msg.getData()); latch.countDown(); log.info("Received message [{}] in the listener, latch: {}", receivedMessage, latch.getCount()); // since not acked, it should retry another time //c1.acknowledgeAsync(msg); }) .subscribe(); assertTrue(consumer instanceof MultiTopicsConsumerImpl); MultiTopicsConsumerImpl topicsConsumer = (MultiTopicsConsumerImpl) consumer; // 3. producer publish messages for (int i = 0; i < totalMessages; i++) { producer1.send((messagePredicate + "producer1-" + i).getBytes()); } // verify should not time out, because of message redelivered several times. latch.await(); consumer.close(); }