Java Code Examples for io.vertx.junit5.VertxTestContext#failNow()
The following examples show how to use
io.vertx.junit5.VertxTestContext#failNow() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRequest.java From vertx-web with Apache License 2.0 | 6 votes |
private Handler<AsyncResult<HttpResponse<Buffer>>> generateHandleResponse(VertxTestContext testContext, VertxTestContext.ExecutionBlock onEnd, Promise<HttpResponse<Buffer>> fut, StackTraceElement[] stackTrace) { return ar -> { if (ar.failed()) { testContext.failNow(ar.cause()); } else { testContext.verify(() -> { try { this.responseAsserts.forEach(c -> c.accept(ar.result())); } catch (AssertionError e) { AssertionError newE = new AssertionError("Assertion error in response: " + e.getMessage(), e); newE.setStackTrace(stackTrace); throw newE; } onEnd.apply(); }); fut.complete(ar.result()); } }; }
Example 2
Source File: TopicOperatorMockTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
Topic getFromKafka(VertxTestContext context, String topicName) throws InterruptedException { AtomicReference<Topic> ref = new AtomicReference<>(); Checkpoint async = context.checkpoint(); Future<TopicMetadata> kafkaMetadata = session.kafka.topicMetadata(new TopicName(topicName)); kafkaMetadata.map(metadata -> TopicSerialization.fromTopicMetadata(metadata)).onComplete(fromKafka -> { if (fromKafka.succeeded()) { ref.set(fromKafka.result()); } else { context.failNow(fromKafka.cause()); } async.flag(); }); if (!context.awaitCompletion(60, TimeUnit.SECONDS)) { context.failNow(new Throwable("Test timeout")); } return ref.get(); }
Example 3
Source File: PlatformFeaturesAvailabilityTest.java From strimzi-kafka-operator with Apache License 2.0 | 6 votes |
public HttpServer startMockApi(VertxTestContext context, String version, List<String> apis) throws InterruptedException { Checkpoint start = context.checkpoint(); HttpServer server = vertx.createHttpServer().requestHandler(request -> { if (HttpMethod.GET.equals(request.method()) && apis.contains(request.uri())) { request.response().setStatusCode(200).end(); } else if (HttpMethod.GET.equals(request.method()) && "/version".equals(request.uri())) { request.response().setStatusCode(200).end(version); } else { request.response().setStatusCode(404).end(); } }).listen(0, res -> { if (res.succeeded()) { start.flag(); } else { throw new RuntimeException(res.cause()); } }); if (!context.awaitCompletion(60, TimeUnit.SECONDS)) { context.failNow(new Throwable("Test timeout")); } return server; }
Example 4
Source File: MqttPublishTestBase.java From hono with Eclipse Public License 2.0 | 5 votes |
/** * Verifies that a number of messages published by a device authenticating with a client certificate can be * successfully consumed via the AMQP Messaging Network. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testUploadMessagesUsingClientCertificate(final VertxTestContext ctx) throws InterruptedException { final SelfSignedCertificate deviceCert = SelfSignedCertificate.create(UUID.randomUUID().toString()); final String tenantId = helper.getRandomTenantId(); final String deviceId = helper.getRandomDeviceId(tenantId); final VertxTestContext setup = new VertxTestContext(); helper.getCertificate(deviceCert.certificatePath()) .compose(cert -> { final var tenant = Tenants.createTenantForTrustAnchor(cert); return helper.registry.addDeviceForTenant(tenantId, tenant, deviceId, cert); }).onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } doTestUploadMessages( ctx, tenantId, deviceId, connectToAdapter(deviceCert), false); }
Example 5
Source File: KafkaRebalanceStateMachineTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
private static void defaultStatusHandler(AsyncResult<KafkaRebalanceStatus> result, VertxTestContext context) { if (result.succeeded()) { context.completeNow(); } else { context.failNow(result.cause()); } }
Example 6
Source File: KafkaRollerTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
private void assertNoUnclosedAdminClient(VertxTestContext testContext, TestingKafkaRoller kafkaRoller) { if (!kafkaRoller.unclosedAdminClients.isEmpty()) { Throwable alloc = kafkaRoller.unclosedAdminClients.values().iterator().next(); alloc.printStackTrace(System.out); testContext.failNow(new Throwable(kafkaRoller.unclosedAdminClients.size() + " unclosed AdminClient instances")); } }
Example 7
Source File: TelemetryHttpIT.java From hono with Eclipse Public License 2.0 | 5 votes |
/** * Verifies that a number of telemetry messages uploaded to Hono's HTTP adapter * using QoS 1 can be successfully consumed via the AMQP Messaging Network. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testUploadUsingQoS1(final VertxTestContext ctx) throws InterruptedException { final VertxTestContext setup = new VertxTestContext(); final Tenant tenant = new Tenant(); final MultiMap requestHeaders = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "binary/octet-stream") .add(HttpHeaders.AUTHORIZATION, authorization) .add(HttpHeaders.ORIGIN, ORIGIN_URI) .add(Constants.HEADER_QOS_LEVEL, "1"); helper.registry.addDeviceForTenant(tenantId, tenant, deviceId, PWD) .onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } testUploadMessages(ctx, tenantId, count -> { return httpClient.create( getEndpointUri(), Buffer.buffer("hello " + count), requestHeaders, response -> response.statusCode() == HttpURLConnection.HTTP_ACCEPTED); }); }
Example 8
Source File: KafkaRebalanceStateMachineTest.java From strimzi-kafka-operator with Apache License 2.0 | 5 votes |
private static void checkOptimizationResults(AsyncResult<KafkaRebalanceStatus> result, VertxTestContext context, boolean shouldBeEmpty) { if (result.succeeded()) { assertEquals(shouldBeEmpty, result.result().getOptimizationResult().isEmpty()); context.completeNow(); } else { context.failNow(result.cause()); } }
Example 9
Source File: HttpTestBase.java From hono with Eclipse Public License 2.0 | 5 votes |
/** * Verifies that the adapter opens a connection if auto-provisioning is enabled for the device certificate. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testConnectSucceedsWithAutoProvisioning(final VertxTestContext ctx) throws InterruptedException { final VertxTestContext setup = new VertxTestContext(); final MultiMap requestHeaders = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "text/plain") .add(HttpHeaders.ORIGIN, ORIGIN_URI); helper.getCertificate(deviceCert.certificatePath()) .compose(cert -> { final var tenant = Tenants.createTenantForTrustAnchor(cert); tenant.getTrustedCertificateAuthorities().get(0).setAutoProvisioningEnabled(true); return helper.registry.addTenant(tenantId, tenant); }) .onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } testUploadMessages(ctx, tenantId, count -> httpClientWithClientCert.create( getEndpointUri(), Buffer.buffer("hello " + count), requestHeaders, response -> response.statusCode() == HttpURLConnection.HTTP_ACCEPTED && hasAccessControlExposedHeaders(response.headers()))); }
Example 10
Source File: HttpTestBase.java From hono with Eclipse Public License 2.0 | 5 votes |
/** * Verifies that a number of messages uploaded to Hono's HTTP adapter using client certificate based authentication * can be successfully consumed via the AMQP Messaging Network. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testUploadMessagesUsingClientCertificate(final VertxTestContext ctx) throws InterruptedException { final VertxTestContext setup = new VertxTestContext(); final MultiMap requestHeaders = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "text/plain") .add(HttpHeaders.ORIGIN, ORIGIN_URI); helper.getCertificate(deviceCert.certificatePath()) .compose(cert -> { final var tenant = Tenants.createTenantForTrustAnchor(cert); return helper.registry.addDeviceForTenant(tenantId, tenant, deviceId, cert); }) .onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } testUploadMessages(ctx, tenantId, count -> { return httpClientWithClientCert.create( getEndpointUri(), Buffer.buffer("hello " + count), requestHeaders, response -> response.statusCode() == HttpURLConnection.HTTP_ACCEPTED && hasAccessControlExposedHeaders(response.headers())); }); }
Example 11
Source File: HttpTestBase.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that the HTTP adapter returns empty responses when sending consecutive requests * for uploading telemetry data or events with a TTD but no command is pending for the device. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testUploadMessagesWithTtdThatDoNotReplyWithCommand(final VertxTestContext ctx) throws InterruptedException { final VertxTestContext setup = new VertxTestContext(); final Tenant tenant = new Tenant(); final MultiMap requestHeaders = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "text/plain") .add(HttpHeaders.AUTHORIZATION, authorization) .add(HttpHeaders.ORIGIN, ORIGIN_URI) .add(Constants.HEADER_TIME_TILL_DISCONNECT, "2"); helper.registry.addDeviceForTenant(tenantId, tenant, deviceId, PWD).onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } testUploadMessages(ctx, tenantId, msg -> { // do NOT send a command, but let the HTTP adapter's timer expire logger.trace("received message"); return TimeUntilDisconnectNotification.fromMessage(msg) .map(notification -> { ctx.verify(() -> { assertThat(notification.getTtd()).isEqualTo(2); assertThat(notification.getTenantId()).isEqualTo(tenantId); assertThat(notification.getDeviceId()).isEqualTo(deviceId); }); return Future.succeededFuture(); }) .orElseGet(() -> Future.succeededFuture()); }, count -> { return httpClient.create( getEndpointUri(), Buffer.buffer("hello " + count), requestHeaders, response -> response.statusCode() == HttpURLConnection.HTTP_ACCEPTED) .map(responseHeaders -> { ctx.verify(() -> { // assert that the response does not contain a command nor a request ID nor a payload assertThat(responseHeaders.get(Constants.HEADER_COMMAND)).isNull(); assertThat(responseHeaders.get(Constants.HEADER_COMMAND_REQUEST_ID)).isNull(); assertThat(responseHeaders.get(HttpHeaders.CONTENT_LENGTH)).isEqualTo("0"); }); return responseHeaders; }); }, 5); }
Example 12
Source File: EventMqttIT.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that an event from a device for which a default TTL has been * specified cannot be consumed after the TTL has expired. * * @param ctx The vert.x test context. * @throws InterruptedException if test execution gets interrupted. */ @Test public void testMessagesExpire(final VertxTestContext ctx) throws InterruptedException { // GIVEN a tenant for which all messages have a TTL of 500ms final String tenantId = helper.getRandomTenantId(); final String deviceId = helper.getRandomDeviceId(tenantId); final Tenant tenant = new Tenant(); tenant.setDefaults(Map.of(MessageHelper.SYS_HEADER_PROPERTY_TTL, 3)); // seconds final VertxTestContext setup = new VertxTestContext(); helper.registry.addDeviceForTenant(tenantId, tenant, deviceId, "secret").onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); } // WHEN a device that belongs to the tenant publishes an event final AtomicInteger receivedMessageCount = new AtomicInteger(0); connectToAdapter(IntegrationTestSupport.getUsername(deviceId, tenantId), "secret") .compose(connack -> send(tenantId, deviceId, Buffer.buffer("hello"), false, (sendAttempt, result) -> { if (sendAttempt.succeeded()) { LOGGER.info("successfully sent event [tenant-id: {}, device-id: {}", tenantId, deviceId); result.complete(); } else { result.fail(sendAttempt.cause()); } })) .compose(ok -> { final Promise<MessageConsumer> consumerCreated = Promise.promise(); VERTX.setTimer(4000, tid -> { LOGGER.info("opening event consumer for tenant [{}]", tenantId); // THEN no messages can be consumed after the TTL has expired createConsumer(tenantId, msg -> receivedMessageCount.incrementAndGet()) .onComplete(consumerCreated); }); return consumerCreated.future(); }) .compose(c -> { final Promise<Void> done = Promise.promise(); VERTX.setTimer(1000, tid -> { if (receivedMessageCount.get() > 0) { done.fail(new IllegalStateException("should not have received any events after TTL has expired")); } else { done.complete(); } }); return done.future(); }).onComplete(ctx.completing()); }
Example 13
Source File: CommandAndControlAmqpIT.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that the adapter forwards commands and responses hence and forth between * an application and a device that have been sent using the async API. * * @param endpointConfig The endpoints to use for sending/receiving commands. * @param ctx The vert.x test context. * @throws InterruptedException if not all commands and responses are exchanged in time. */ @ParameterizedTest(name = IntegrationTestSupport.PARAMETERIZED_TEST_NAME_PATTERN) @MethodSource("allCombinations") public void testSendAsyncCommandsSucceeds( final AmqpCommandEndpointConfiguration endpointConfig, final VertxTestContext ctx) throws InterruptedException { final String commandTargetDeviceId = endpointConfig.isSubscribeAsGateway() ? helper.setupGatewayDeviceBlocking(tenantId, deviceId, 5) : deviceId; connectAndSubscribe(ctx, commandTargetDeviceId, endpointConfig, (cmdReceiver, cmdResponseSender) -> createCommandConsumer(ctx, cmdReceiver, cmdResponseSender)); final String replyId = "reply-id"; final int totalNoOfCommandsToSend = 60; final CountDownLatch commandsSucceeded = new CountDownLatch(totalNoOfCommandsToSend); final AtomicInteger commandsSent = new AtomicInteger(0); final AtomicLong lastReceivedTimestamp = new AtomicLong(); final VertxTestContext setup = new VertxTestContext(); final Future<MessageConsumer> asyncResponseConsumer = helper.applicationClientFactory.createAsyncCommandResponseConsumer( tenantId, replyId, response -> { lastReceivedTimestamp.set(System.currentTimeMillis()); commandsSucceeded.countDown(); if (commandsSucceeded.getCount() % 20 == 0) { log.info("command responses received: {}", totalNoOfCommandsToSend - commandsSucceeded.getCount()); } }, null); final Future<AsyncCommandClient> asyncCommandClient = helper.applicationClientFactory.getOrCreateAsyncCommandClient(tenantId); CompositeFuture.all(asyncResponseConsumer, asyncCommandClient).onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); } final long start = System.currentTimeMillis(); while (commandsSent.get() < totalNoOfCommandsToSend) { final CountDownLatch commandSent = new CountDownLatch(1); context.runOnContext(go -> { final String correlationId = String.valueOf(commandsSent.getAndIncrement()); final Buffer msg = Buffer.buffer("value: " + correlationId); asyncCommandClient.result().sendAsyncCommand( commandTargetDeviceId, "setValue", "text/plain", msg, correlationId, replyId, null) .onComplete(sendAttempt -> { if (sendAttempt.failed()) { log.debug("error sending command {}", correlationId, sendAttempt.cause()); } if (commandsSent.get() % 20 == 0) { log.info("commands sent: " + commandsSent.get()); } commandSent.countDown(); }); }); commandSent.await(); } final long timeToWait = totalNoOfCommandsToSend * 200; if (!commandsSucceeded.await(timeToWait, TimeUnit.MILLISECONDS)) { log.info("Timeout of {} milliseconds reached, stop waiting for command responses", timeToWait); } final long commandsCompleted = totalNoOfCommandsToSend - commandsSucceeded.getCount(); log.info("commands sent: {}, responses received: {} after {} milliseconds", commandsSent.get(), commandsCompleted, lastReceivedTimestamp.get() - start); if (commandsCompleted == commandsSent.get()) { ctx.completeNow(); } else { ctx.failNow(new IllegalStateException("did not complete all commands sent")); } }
Example 14
Source File: CommandAndControlAmqpIT.java From hono with Eclipse Public License 2.0 | 4 votes |
private void testSendCommandSucceeds( final VertxTestContext ctx, final String commandTargetDeviceId, final AmqpCommandEndpointConfiguration endpointConfig, final BiFunction<ProtonReceiver, ProtonSender, ProtonMessageHandler> commandConsumerFactory, final Function<Buffer, Future<?>> commandSender, final int totalNoOfCommandsToSend) throws InterruptedException { connectAndSubscribe(ctx, commandTargetDeviceId, endpointConfig, commandConsumerFactory); final CountDownLatch commandsSucceeded = new CountDownLatch(totalNoOfCommandsToSend); final AtomicInteger commandsSent = new AtomicInteger(0); final AtomicLong lastReceivedTimestamp = new AtomicLong(); final long start = System.currentTimeMillis(); while (commandsSent.get() < totalNoOfCommandsToSend) { final int currentMessage = commandsSent.incrementAndGet(); final CountDownLatch commandSent = new CountDownLatch(1); context.runOnContext(go -> { final Buffer payload = Buffer.buffer("value: " + currentMessage); commandSender.apply(payload).onComplete(sendAttempt -> { if (sendAttempt.failed()) { log.debug("error sending command {}", currentMessage, sendAttempt.cause()); } else { lastReceivedTimestamp.set(System.currentTimeMillis()); commandsSucceeded.countDown(); log.debug("sent command no {}", currentMessage); if (commandsSucceeded.getCount() % 20 == 0) { log.info("commands succeeded: {}", totalNoOfCommandsToSend - commandsSucceeded.getCount()); } } commandSent.countDown(); }); }); commandSent.await(); if (currentMessage % 20 == 0) { log.info("commands sent: " + currentMessage); } } final long timeToWait = totalNoOfCommandsToSend * 200; if (!commandsSucceeded.await(timeToWait, TimeUnit.MILLISECONDS)) { log.info("Timeout of {} milliseconds reached, stop waiting for commands to succeed", timeToWait); } final long commandsCompleted = totalNoOfCommandsToSend - commandsSucceeded.getCount(); log.info("commands sent: {}, commands succeeded: {} after {} milliseconds", commandsSent.get(), commandsCompleted, lastReceivedTimestamp.get() - start); if (commandsCompleted == commandsSent.get()) { ctx.completeNow(); } else { ctx.failNow(new IllegalStateException("did not complete all commands sent")); } }
Example 15
Source File: CommandAndControlAmqpIT.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that the adapter rejects malformed command messages sent by applications. * * @param endpointConfig The endpoints to use for sending/receiving commands. * @param ctx The vert.x test context. * @throws InterruptedException if not all commands and responses are exchanged in time. */ @ParameterizedTest(name = IntegrationTestSupport.PARAMETERIZED_TEST_NAME_PATTERN) @MethodSource("allCombinations") @Timeout(timeUnit = TimeUnit.SECONDS, value = 10) public void testSendCommandFailsForMalformedMessage( final AmqpCommandEndpointConfiguration endpointConfig, final VertxTestContext ctx) throws InterruptedException { final String commandTargetDeviceId = endpointConfig.isSubscribeAsGateway() ? helper.setupGatewayDeviceBlocking(tenantId, deviceId, 5) : deviceId; final AtomicReference<MessageSender> sender = new AtomicReference<>(); final String targetAddress = endpointConfig.getSenderLinkTargetAddress(tenantId); final VertxTestContext setup = new VertxTestContext(); final Checkpoint preconditions = setup.checkpoint(2); connectToAdapter(tenantId, tenant, deviceId, password, () -> createEventConsumer(tenantId, msg -> { // expect empty notification with TTD -1 ctx.verify(() -> assertThat(msg.getContentType()).isEqualTo(EventConstants.CONTENT_TYPE_EMPTY_NOTIFICATION)); final TimeUntilDisconnectNotification notification = TimeUntilDisconnectNotification.fromMessage(msg).orElse(null); log.debug("received notification [{}]", notification); ctx.verify(() -> assertThat(notification).isNotNull()); if (notification.getTtd() == -1) { preconditions.flag(); } })) .compose(con -> subscribeToCommands(endpointConfig, tenantId, commandTargetDeviceId) .map(recv -> { recv.handler((delivery, msg) -> ctx .failNow(new IllegalStateException("should not have received command"))); return null; })) .compose(ok -> helper.applicationClientFactory.createGenericMessageSender(targetAddress)) .map(s -> { log.debug("created generic sender for sending commands [target address: {}]", targetAddress); sender.set(s); preconditions.flag(); return s; }) .onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); } final Checkpoint expectedFailures = ctx.checkpoint(2); log.debug("sending command message lacking subject"); final Message messageWithoutSubject = ProtonHelper.message("input data"); messageWithoutSubject.setAddress(endpointConfig.getCommandMessageAddress(tenantId, commandTargetDeviceId)); messageWithoutSubject.setMessageId("message-id"); messageWithoutSubject.setReplyTo("reply/to/address"); sender.get().sendAndWaitForOutcome(messageWithoutSubject).onComplete(ctx.failing(t -> { ctx.verify(() -> assertThat(t).isInstanceOf(ClientErrorException.class)); expectedFailures.flag(); })); log.debug("sending command message lacking message ID and correlation ID"); final Message messageWithoutId = ProtonHelper.message("input data"); messageWithoutId.setAddress(endpointConfig.getCommandMessageAddress(tenantId, commandTargetDeviceId)); messageWithoutId.setSubject("setValue"); messageWithoutId.setReplyTo("reply/to/address"); sender.get().sendAndWaitForOutcome(messageWithoutId).onComplete(ctx.failing(t -> { ctx.verify(() -> assertThat(t).isInstanceOf(ClientErrorException.class)); expectedFailures.flag(); })); }
Example 16
Source File: KafkaAssemblyOperatorTest.java From strimzi-kafka-operator with Apache License 2.0 | 4 votes |
@ParameterizedTest @MethodSource("data") @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) public void testReconcile(Params params, VertxTestContext context) { setFields(params); // create CM, Service, headless service, statefulset ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); CrdOperator mockKafkaOps = supplier.kafkaOperator; KafkaSetOperator mockKsOps = supplier.kafkaSetOperations; SecretOperator mockSecretOps = supplier.secretOperations; String clusterCmNamespace = "test"; Kafka foo = getKafkaAssembly("foo"); Kafka bar = getKafkaAssembly("bar"); when(mockKafkaOps.listAsync(eq(clusterCmNamespace), any(Optional.class))).thenReturn( Future.succeededFuture(asList(foo, bar)) ); // when requested Custom Resource for a specific Kafka cluster when(mockKafkaOps.get(eq(clusterCmNamespace), eq("foo"))).thenReturn(foo); when(mockKafkaOps.get(eq(clusterCmNamespace), eq("bar"))).thenReturn(bar); when(mockKafkaOps.getAsync(eq(clusterCmNamespace), eq("foo"))).thenReturn(Future.succeededFuture(foo)); when(mockKafkaOps.getAsync(eq(clusterCmNamespace), eq("bar"))).thenReturn(Future.succeededFuture(bar)); when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); // providing certificates Secrets for existing clusters List<Secret> fooSecrets = ResourceUtils.createKafkaClusterInitialSecrets(clusterCmNamespace, "foo"); //ClusterCa fooCerts = ResourceUtils.createInitialClusterCa("foo", ModelUtils.findSecretWithName(fooSecrets, AbstractModel.clusterCaCertSecretName("foo"))); List<Secret> barSecrets = ResourceUtils.createKafkaClusterSecretsWithReplicas(clusterCmNamespace, "bar", bar.getSpec().getKafka().getReplicas(), bar.getSpec().getZookeeper().getReplicas()); ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa("bar", findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa("bar", findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); // providing the list of ALL StatefulSets for all the Kafka clusters Labels newLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND); when(mockKsOps.list(eq(clusterCmNamespace), eq(newLabels))).thenReturn( asList(KafkaCluster.fromCrd(bar, VERSIONS).generateStatefulSet(openShift, null, null)) ); when(mockSecretOps.get(eq(clusterCmNamespace), eq(AbstractModel.clusterCaCertSecretName(foo.getMetadata().getName())))) .thenReturn( fooSecrets.get(0)); when(mockSecretOps.reconcile(eq(clusterCmNamespace), eq(AbstractModel.clusterCaCertSecretName(foo.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); // providing the list StatefulSets for already "existing" Kafka clusters Labels barLabels = Labels.forStrimziCluster("bar"); KafkaCluster barCluster = KafkaCluster.fromCrd(bar, VERSIONS); when(mockKsOps.list(eq(clusterCmNamespace), eq(barLabels))).thenReturn( asList(barCluster.generateStatefulSet(openShift, null, null)) ); when(mockSecretOps.list(eq(clusterCmNamespace), eq(barLabels))).thenAnswer( invocation -> new ArrayList<>(asList( barClientsCa.caKeySecret(), barClientsCa.caCertSecret(), barCluster.generateBrokersSecret(), barClusterCa.caCertSecret())) ); when(mockSecretOps.get(eq(clusterCmNamespace), eq(AbstractModel.clusterCaCertSecretName(bar.getMetadata().getName())))).thenReturn(barSecrets.get(0)); when(mockSecretOps.reconcile(eq(clusterCmNamespace), eq(AbstractModel.clusterCaCertSecretName(bar.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); Checkpoint fooAsync = context.checkpoint(); Checkpoint barAsync = context.checkpoint(); KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), certManager, passwordGenerator, supplier, config) { @Override public Future<Void> createOrUpdate(Reconciliation reconciliation, Kafka kafkaAssembly) { String name = kafkaAssembly.getMetadata().getName(); if ("foo".equals(name)) { fooAsync.flag(); } else if ("bar".equals(name)) { barAsync.flag(); } else { context.failNow(new AssertionError("Unexpected name " + name)); } return Future.succeededFuture(); } }; Checkpoint async = context.checkpoint(); // Now try to reconcile all the Kafka clusters ops.reconcileAll("test", clusterCmNamespace, context.succeeding(v -> async.flag())); }
Example 17
Source File: CommandAndControlAmqpIT.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that the adapter forwards the <em>released</em> disposition back to the * application if the device hasn't sent a disposition update for the delivery of * the command message sent to the device. * * @param ctx The vert.x test context. * @throws InterruptedException if not all commands and responses are exchanged in time. */ @Test @Timeout(timeUnit = TimeUnit.SECONDS, value = 10) public void testSendCommandFailsForCommandNotAcknowledgedByDevice( final VertxTestContext ctx) throws InterruptedException { final AmqpCommandEndpointConfiguration endpointConfig = new AmqpCommandEndpointConfiguration(SubscriberRole.DEVICE); final String commandTargetDeviceId = endpointConfig.isSubscribeAsGateway() ? helper.setupGatewayDeviceBlocking(tenantId, deviceId, 5) : deviceId; final AtomicInteger receivedMessagesCounter = new AtomicInteger(0); // command handler won't send a disposition update connectAndSubscribe(ctx, commandTargetDeviceId, endpointConfig, (cmdReceiver, cmdResponseSender) -> createNotSendingDeliveryUpdateCommandConsumer(ctx, cmdReceiver, receivedMessagesCounter)); final int totalNoOfCommandsToSend = 2; final CountDownLatch commandsFailed = new CountDownLatch(totalNoOfCommandsToSend); final AtomicInteger commandsSent = new AtomicInteger(0); final AtomicLong lastReceivedTimestamp = new AtomicLong(); final long start = System.currentTimeMillis(); final VertxTestContext commandClientCreation = new VertxTestContext(); final Future<CommandClient> commandClient = helper.applicationClientFactory.getOrCreateCommandClient(tenantId, "test-client") .onSuccess(c -> c.setRequestTimeout(1300)) // have to wait more than AmqpAdapterProperties.DEFAULT_SEND_MESSAGE_TO_DEVICE_TIMEOUT (1000ms) for the first command message .onComplete(commandClientCreation.completing()); assertThat(commandClientCreation.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (commandClientCreation.failed()) { ctx.failNow(commandClientCreation.causeOfFailure()); } while (commandsSent.get() < totalNoOfCommandsToSend) { final CountDownLatch commandSent = new CountDownLatch(1); context.runOnContext(go -> { final Buffer msg = Buffer.buffer("value: " + commandsSent.getAndIncrement()); final Future<BufferResult> sendCmdFuture = commandClient.result().sendCommand(commandTargetDeviceId, "setValue", "text/plain", msg, null); sendCmdFuture.onComplete(sendAttempt -> { if (sendAttempt.succeeded()) { log.debug("sending command {} succeeded unexpectedly", commandsSent.get()); } else { if (sendAttempt.cause() instanceof ServerErrorException && ((ServerErrorException) sendAttempt.cause()).getErrorCode() == HttpURLConnection.HTTP_UNAVAILABLE) { log.debug("sending command {} failed as expected: {}", commandsSent.get(), sendAttempt.cause().toString()); lastReceivedTimestamp.set(System.currentTimeMillis()); commandsFailed.countDown(); if (commandsFailed.getCount() % 20 == 0) { log.info("commands failed as expected: {}", totalNoOfCommandsToSend - commandsFailed.getCount()); } } else { log.debug("sending command {} failed with an unexpected error", commandsSent.get(), sendAttempt.cause()); } } if (commandsSent.get() % 20 == 0) { log.info("commands sent: " + commandsSent.get()); } commandSent.countDown(); }); }); commandSent.await(); } // have to wait more than AmqpAdapterProperties.DEFAULT_SEND_MESSAGE_TO_DEVICE_TIMEOUT (1000ms) for each command message final long timeToWait = 300 + (totalNoOfCommandsToSend * 1300); if (!commandsFailed.await(timeToWait, TimeUnit.MILLISECONDS)) { log.info("Timeout of {} milliseconds reached, stop waiting for commands", timeToWait); } assertThat(receivedMessagesCounter.get()).isEqualTo(totalNoOfCommandsToSend); final long commandsCompleted = totalNoOfCommandsToSend - commandsFailed.getCount(); log.info("commands sent: {}, commands failed: {} after {} milliseconds", commandsSent.get(), commandsCompleted, lastReceivedTimestamp.get() - start); if (commandsCompleted == commandsSent.get()) { ctx.completeNow(); } else { ctx.failNow(new IllegalStateException("did not complete all commands sent")); } }
Example 18
Source File: HttpTestBase.java From hono with Eclipse Public License 2.0 | 4 votes |
/** * Verifies that a number of messages uploaded to the HTTP adapter via a gateway using HTTP Basic auth can be * successfully consumed via the AMQP Messaging Network. * * @param ctx The test context. * @throws InterruptedException if the test fails. */ @Test public void testUploadMessagesViaGateway(final VertxTestContext ctx) throws InterruptedException { // GIVEN a device that is connected via two gateways final Tenant tenant = new Tenant(); final String gatewayOneId = helper.getRandomDeviceId(tenantId); final String gatewayTwoId = helper.getRandomDeviceId(tenantId); final Device device = new Device(); device.setVia(Arrays.asList(gatewayOneId, gatewayTwoId)); final VertxTestContext setup = new VertxTestContext(); helper.registry.addDeviceForTenant(tenantId, tenant, gatewayOneId, PWD) .compose(ok -> helper.registry.addDeviceToTenant(tenantId, gatewayTwoId, PWD)) .compose(ok -> helper.registry.registerDevice(tenantId, deviceId, device)) .onComplete(setup.completing()); assertThat(setup.awaitCompletion(5, TimeUnit.SECONDS)).isTrue(); if (setup.failed()) { ctx.failNow(setup.causeOfFailure()); return; } final MultiMap requestHeadersOne = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "text/plain") .add(HttpHeaders.AUTHORIZATION, getBasicAuth(tenantId, gatewayOneId, PWD)) .add(HttpHeaders.ORIGIN, ORIGIN_URI); final MultiMap requestHeadersTwo = MultiMap.caseInsensitiveMultiMap() .add(HttpHeaders.CONTENT_TYPE, "text/plain") .add(HttpHeaders.AUTHORIZATION, getBasicAuth(tenantId, gatewayTwoId, PWD)) .add(HttpHeaders.ORIGIN, ORIGIN_URI); final String uri = String.format("%s/%s/%s", getEndpointUri(), tenantId, deviceId); testUploadMessages( ctx, tenantId, count -> { final MultiMap headers = (count.intValue() & 1) == 0 ? requestHeadersOne : requestHeadersTwo; return httpClient.update( // GW uses PUT when acting on behalf of a device uri, Buffer.buffer("hello " + count), headers, status -> status == HttpURLConnection.HTTP_ACCEPTED); }); }
Example 19
Source File: KafkaAssemblyOperatorTest.java From strimzi-kafka-operator with Apache License 2.0 | 4 votes |
@ParameterizedTest @MethodSource("data") @Timeout(value = 2, timeUnit = TimeUnit.MINUTES) public void testReconcileAllNamespaces(Params params, VertxTestContext context) { setFields(params); // create CM, Service, headless service, statefulset ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); CrdOperator mockKafkaOps = supplier.kafkaOperator; KafkaSetOperator mockKsOps = supplier.kafkaSetOperations; SecretOperator mockSecretOps = supplier.secretOperations; Kafka foo = getKafkaAssembly("foo"); foo.getMetadata().setNamespace("namespace1"); Kafka bar = getKafkaAssembly("bar"); bar.getMetadata().setNamespace("namespace2"); when(mockKafkaOps.listAsync(eq("*"), any(Optional.class))).thenReturn( Future.succeededFuture(asList(foo, bar)) ); // when requested Custom Resource for a specific Kafka cluster when(mockKafkaOps.get(eq("namespace1"), eq("foo"))).thenReturn(foo); when(mockKafkaOps.get(eq("namespace2"), eq("bar"))).thenReturn(bar); when(mockKafkaOps.getAsync(eq("namespace1"), eq("foo"))).thenReturn(Future.succeededFuture(foo)); when(mockKafkaOps.getAsync(eq("namespace2"), eq("bar"))).thenReturn(Future.succeededFuture(bar)); when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); // providing certificates Secrets for existing clusters List<Secret> fooSecrets = ResourceUtils.createKafkaClusterInitialSecrets("namespace1", "foo"); List<Secret> barSecrets = ResourceUtils.createKafkaClusterSecretsWithReplicas("namespace2", "bar", bar.getSpec().getKafka().getReplicas(), bar.getSpec().getZookeeper().getReplicas()); ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa("bar", findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa("bar", findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); // providing the list of ALL StatefulSets for all the Kafka clusters Labels newLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND); when(mockKsOps.list(eq("*"), eq(newLabels))).thenReturn( asList(KafkaCluster.fromCrd(bar, VERSIONS).generateStatefulSet(openShift, null, null)) ); // providing the list StatefulSets for already "existing" Kafka clusters Labels barLabels = Labels.forStrimziCluster("bar"); KafkaCluster barCluster = KafkaCluster.fromCrd(bar, VERSIONS); when(mockKsOps.list(eq("*"), eq(barLabels))).thenReturn( asList(barCluster.generateStatefulSet(openShift, null, null)) ); when(mockSecretOps.list(eq("*"), eq(barLabels))).thenAnswer( invocation -> new ArrayList<>(asList( barClientsCa.caKeySecret(), barClientsCa.caCertSecret(), barCluster.generateBrokersSecret(), barClusterCa.caCertSecret())) ); Checkpoint fooAsync = context.checkpoint(); Checkpoint barAsync = context.checkpoint(); KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), certManager, passwordGenerator, supplier, config) { @Override public Future<Void> createOrUpdate(Reconciliation reconciliation, Kafka kafkaAssembly) { String name = kafkaAssembly.getMetadata().getName(); if ("foo".equals(name)) { fooAsync.flag(); } else if ("bar".equals(name)) { barAsync.flag(); } else { context.failNow(new AssertionError("Unexpected name " + name)); } return Future.succeededFuture(); } }; Checkpoint async = context.checkpoint(); // Now try to reconcile all the Kafka clusters ops.reconcileAll("test", "*", context.succeeding(v -> async.flag())); }
Example 20
Source File: MqttPublishTestBase.java From hono with Eclipse Public License 2.0 | 3 votes |
/** * Asserts that the ration between messages that have been received and messages * being sent is acceptable for the particular QoS used for publishing messages. * <p> * This default implementation asserts that received = sent. * * @param received The number of messages that have been received. * @param sent The number of messages that have been sent. * @param ctx The test context that will be failed if the ratio is not acceptable. */ protected void assertMessageReceivedRatio(final long received, final long sent, final VertxTestContext ctx) { if (received < sent) { final String msg = String.format("did not receive expected number of messages [expected: %d, received: %d]", sent, received); ctx.failNow(new IllegalStateException(msg)); } else { ctx.completeNow(); } }