software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent Java Examples
The following examples show how to use
software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisStreamReactorEx.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
/** * Uses Reactor via the onEventStream lifecycle method. This gives you full access to the publisher, which can be used * to create a Flux. */ private static CompletableFuture<Void> responseHandlerBuilder_Reactor(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> Flux.from(p) .ofType(SubscribeToShardEvent.class) .flatMapIterable(SubscribeToShardEvent::records) .limitRate(1000) .buffer(25) .subscribe(e -> System.out.println("Record batch = " + e))) .build(); return client.subscribeToShard(request, responseHandler); }
Example #2
Source File: KinesisStreamRxJavaEx.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
/** * Uses RxJava via the onEventStream lifecycle method. This gives you full access to the publisher, which can be used * to create an Rx Flowable. */ private static CompletableFuture<Void> responseHandlerBuilder_RxJava(KinesisAsyncClient client, SubscribeToShardRequest request) { // snippet-start:[kinesis.java2.stream_rx_example.event_stream] SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> Flowable.fromPublisher(p) .ofType(SubscribeToShardEvent.class) .flatMapIterable(SubscribeToShardEvent::records) .limit(1000) .buffer(25) .subscribe(e -> System.out.println("Record batch = " + e))) .build(); // snippet-end:[kinesis.java2.stream_rx_example.event_stream] return client.subscribeToShard(request, responseHandler); }
Example #3
Source File: FanOutRecordsPublisher.java From amazon-kinesis-client with Apache License 2.0 | 6 votes |
@Override public void onNext(SubscribeToShardEventStream recordBatchEvent) { synchronized (parent.lockObject) { if (flow.shouldSubscriptionCancel()) { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onNext) @ {} id: {} -- RecordFlow requires cancelling", parent.shardId, connectionStartedAt, subscribeToShardId); cancel(); return; } recordBatchEvent.accept(new SubscribeToShardResponseHandler.Visitor() { @Override public void visit(SubscribeToShardEvent event) { flow.recordsReceived(event); } }); } }
Example #4
Source File: SubscribeToShardUnmarshallingTest.java From aws-sdk-java-v2 with Apache License 2.0 | 5 votes |
@Test public void eventWithRecords_UnmarshalledCorrectly() throws Throwable { String data = BinaryUtils.toBase64("foobar".getBytes(StandardCharsets.UTF_8)); AbortableInputStream content = new MessageWriter() .writeInitialResponse(new byte[0]) .writeEvent("SubscribeToShardEvent", String.format("{\"ContinuationSequenceNumber\": \"1234\"," + "\"MillisBehindLatest\": 0," + "\"Records\": [{\"Data\": \"%s\"}]" + "}", data)) .toInputStream(); SubscribeToShardEvent event = SubscribeToShardEvent.builder() .continuationSequenceNumber("1234") .millisBehindLatest(0L) .records(Record.builder() .data(SdkBytes.fromUtf8String("foobar")) .build()) .build(); stubResponse(SdkHttpFullResponse.builder() .statusCode(200) .content(content) .build()); List<SubscribeToShardEventStream> events = subscribeToShard(); assertThat(events).containsOnly(event); }
Example #5
Source File: KinesisStabilityTest.java From aws-sdk-java-v2 with Apache License 2.0 | 5 votes |
@Override public void onEventStream(SdkPublisher<SubscribeToShardEventStream> publisher) { publisher.filter(SubscribeToShardEvent.class) .subscribe(b -> { log.debug(() -> "sequenceNumber " + b.records() + "_" + id); receivedData.addAll(b.records().stream().map(Record::data).collect(Collectors.toList())); }); }
Example #6
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Uses the publisherTransformer method to customize the publisher before ultimately subscribing to it */ // snippet-start:[kinesis.java2.stream_example.publish_transformer] private static CompletableFuture<Void> responseHandlerBuilderPublisherTransformer(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .publisherTransformer(p -> p.filter(e -> e instanceof SubscribeToShardEvent).limit(100)) .subscriber(e -> System.out.println("Received event - " + e)) .build(); return client.subscribeToShard(request, responseHandler); }
Example #7
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Subscribes to the stream of events by implementing the SubscribeToShardResponseHandler.Visitor interface */ private static CompletableFuture<Void> responseHandlerBuilderVisitor(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler.Visitor visitor = new SubscribeToShardResponseHandler.Visitor() { @Override public void visit(SubscribeToShardEvent event) { System.out.println("Received subscribe to shard event " + event); } }; SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(visitor) .build(); return client.subscribeToShard(request, responseHandler); }
Example #8
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Creates a SubscribeToShardResponseHandler the classic way by implementing the interface */ // snippet-start:[kinesis.java2.stream_example.custom_handler] private static CompletableFuture<Void> responseHandlerBuilderClassic(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = new SubscribeToShardResponseHandler() { @Override public void responseReceived(SubscribeToShardResponse response) { System.out.println("Receieved initial response"); } @Override public void onEventStream(SdkPublisher<SubscribeToShardEventStream> publisher) { publisher // Filter to only SubscribeToShardEvents .filter(SubscribeToShardEvent.class) // Flat map into a publisher of just records .flatMapIterable(SubscribeToShardEvent::records) // Limit to 1000 total records .limit(1000) // Batch records into lists of 25 .buffer(25) // Print out each record batch .subscribe(batch -> System.out.println("Record Batch - " + batch)); } @Override public void complete() { System.out.println("All records stream successfully"); } @Override public void exceptionOccurred(Throwable throwable) { System.err.println("Error during stream - " + throwable.getMessage()); } }; return client.subscribeToShard(request, responseHandler); }
Example #9
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Subscribes to the publisher using the onEventStream lifecycle callback method, which allows greater control * over the publisher and allows transformation methods on the publisher, like map and buffer */ private static CompletableFuture<Void> responseHandlerBuilderOnEventStream(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> p.filter(SubscribeToShardEvent.class).subscribe(new MySubscriber())) .build(); return client.subscribeToShard(request, responseHandler); }
Example #10
Source File: FanOutRecordsPublisher.java From amazon-kinesis-client with Apache License 2.0 | 5 votes |
private void recordsReceived(RecordFlow triggeringFlow, SubscribeToShardEvent recordBatchEvent) { synchronized (lockObject) { if (!hasValidSubscriber()) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Subscriber is null.", shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); triggeringFlow.cancel(); if (flow != null) { flow.cancel(); } return; } if (!isActiveFlow(triggeringFlow)) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Received records for an inactive flow.", shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); return; } List<KinesisClientRecord> records = recordBatchEvent.records().stream().map(KinesisClientRecord::fromRecord) .collect(Collectors.toList()); ProcessRecordsInput input = ProcessRecordsInput.builder().cacheEntryTime(Instant.now()) .millisBehindLatest(recordBatchEvent.millisBehindLatest()) .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null).records(records).build(); FanoutRecordsRetrieved recordsRetrieved = new FanoutRecordsRetrieved(input, recordBatchEvent.continuationSequenceNumber(), triggeringFlow.subscribeToShardId); try { bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, triggeringFlow); } catch (Throwable t) { log.warn("{}: Unable to buffer or schedule onNext for subscriber. Failing publisher." + " Last successful request details -- {}", shardId, lastSuccessfulRequestDetails); errorOccurred(triggeringFlow, t); } } }
Example #11
Source File: SubscribeToShardIntegrationTest.java From aws-sdk-java-v2 with Apache License 2.0 | 5 votes |
@Test public void subscribeToShard_ReceivesAllData() { List<SdkBytes> producedData = new ArrayList<>(); ScheduledExecutorService producer = Executors.newScheduledThreadPool(1); // Delay it a bit to allow us to subscribe first producer.scheduleAtFixedRate(() -> putRecord().ifPresent(producedData::add), 10, 1, TimeUnit.SECONDS); List<SdkBytes> receivedData = new ArrayList<>(); // Add every event's data to the receivedData list Consumer<SubscribeToShardEvent> eventConsumer = s -> receivedData.addAll( s.records().stream() .map(Record::data) .collect(Collectors.toList())); asyncClient.subscribeToShard(r -> r.consumerARN(consumerArn) .shardId(shardId) .startingPosition(s -> s.type(ShardIteratorType.LATEST)), SubscribeToShardResponseHandler.builder() .onEventStream(p -> p.filter(SubscribeToShardEvent.class) .subscribe(eventConsumer)) .onResponse(this::verifyHttpMetadata) .build()) .join(); producer.shutdown(); // Make sure we all the data we received was data we published, we may have published more // if the producer isn't shutdown immediately after we finish subscribing. assertThat(producedData).containsSequence(receivedData); }
Example #12
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testContinuesAfterSequence() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(new ExtendedSequenceNumber("0"), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); NonFailingSubscriber nonFailingSubscriber = new NonFailingSubscriber(); source.subscribe(new ShardConsumerNotifyingSubscriber(nonFailingSubscriber, source)); SubscribeToShardRequest expected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) .startingPosition(StartingPosition.builder().sequenceNumber("0") .type(ShardIteratorType.AT_SEQUENCE_NUMBER).build()) .build(); verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) .continuationSequenceNumber("3").build(); captor.getValue().onNext(batchEvent); captor.getValue().onComplete(); flowCaptor.getValue().complete(); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> nextSubscribeCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> nextFlowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN) .shardId(SHARD_ID).startingPosition(StartingPosition.builder().sequenceNumber("3") .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER).build()) .build(); verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); reset(publisher); doNothing().when(publisher).subscribe(nextSubscribeCaptor.capture()); nextFlowCaptor.getValue().onEventStream(publisher); nextSubscribeCaptor.getValue().onSubscribe(subscription); List<Record> nextRecords = Stream.of(4, 5, 6).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> nextMatchers = nextRecords.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(nextRecords) .continuationSequenceNumber("6").build(); nextSubscribeCaptor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); assertThat(nonFailingSubscriber.received.size(), equalTo(2)); verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers); verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers); }
Example #13
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void largeRequestTest() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber<RecordsRetrieved>() { Subscription subscription; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(3); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); assertThat(receivedInput.size(), equalTo(3)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); }
Example #14
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(1); int totalServicePublisherEvents = 1000; int initialDemand = 11; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); AtomicBoolean onErrorSet = new AtomicBoolean(false); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); onErrorSet.set(true); servicePublisherTaskCompletionLatch.countDown(); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertTrue("onError should have triggered", onErrorSet.get()); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); }
Example #15
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 9; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); if(receivedInput.size() == totalServicePublisherEvents) { servicePublisherTaskCompletionLatch.countDown(); } } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertThat(receivedInput.size(), equalTo(totalServicePublisherEvents)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + "")); }
Example #16
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAndOnErrorAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); CountDownLatch onErrorReceiveLatch = new CountDownLatch(1); int totalServicePublisherEvents = 1000; int initialDemand = 9; int triggerErrorAtNthEvent = 241; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); servicePublisher.setErrorTrigger(triggerErrorAtNthEvent, () -> flowCaptor.getValue().exceptionOccurred(new RuntimeException("Service Exception"))); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); final boolean[] isOnErrorThrown = { false }; List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); if(receivedInput.size() == triggerErrorAtNthEvent) { servicePublisherTaskCompletionLatch.countDown(); } } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); isOnErrorThrown[0] = true; onErrorReceiveLatch.countDown(); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertThat(receivedInput.size(), equalTo(triggerErrorAtNthEvent)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo(triggerErrorAtNthEvent + "")); onErrorReceiveLatch.await(5000, TimeUnit.MILLISECONDS); assertTrue("OnError should have been thrown", isOnErrorThrown[0]); }
Example #17
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 0; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); if(receivedInput.size() == totalServicePublisherEvents) { servicePublisherTaskCompletionLatch.countDown(); } } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertThat(receivedInput.size(), equalTo(totalServicePublisherEvents)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + "")); }
Example #18
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { Subscription subscription; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); Scheduler testScheduler = getScheduler(getOverwhelmedBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(new SafeSubscriber<>(shardConsumerSubscriber)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); Stream.of("1000", "2000", "3000") .map(contSeqNum -> SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum) .records(records).build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(2)).request(1); assertThat(receivedInput.size(), equalTo(1)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo("1000")); }
Example #19
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfAllEventsReceivedWhenNoTasksRejectedByExecutor() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { Subscription subscription; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); Scheduler testScheduler = getScheduler(getBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); Stream.of("1000", "2000", "3000") .map(contSeqNum -> SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum) .records(records).build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(4)).request(1); assertThat(receivedInput.size(), equalTo(3)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo("3000")); }
Example #20
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void simpleTest() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber<RecordsRetrieved>() { Subscription subscription; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); assertThat(receivedInput.size(), equalTo(3)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); }
Example #21
Source File: FanOutRecordsPublisher.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
private void recordsReceived(SubscribeToShardEvent event) { parent.recordsReceived(this, event); }