software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest Java Examples
The following examples show how to use
software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 6 votes |
@Test public void testResourceNotFoundForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); ArgumentCaptor<RecordsRetrieved> inputCaptor = ArgumentCaptor.forClass(RecordsRetrieved.class); source.subscribe(subscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); recordFlow.exceptionOccurred(new RuntimeException(ResourceNotFoundException.builder().build())); verify(subscriber).onSubscribe(any()); verify(subscriber, never()).onError(any()); verify(subscriber).onNext(inputCaptor.capture()); verify(subscriber).onComplete(); ProcessRecordsInput input = inputCaptor.getValue().processRecordsInput(); assertThat(input.isAtShardEnd(), equalTo(true)); assertThat(input.records().isEmpty(), equalTo(true)); }
Example #2
Source File: KinesisStreamReactorEx.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
/** * Uses Reactor via the onEventStream lifecycle method. This gives you full access to the publisher, which can be used * to create a Flux. */ private static CompletableFuture<Void> responseHandlerBuilder_Reactor(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> Flux.from(p) .ofType(SubscribeToShardEvent.class) .flatMapIterable(SubscribeToShardEvent::records) .limitRate(1000) .buffer(25) .subscribe(e -> System.out.println("Record batch = " + e))) .build(); return client.subscribeToShard(request, responseHandler); }
Example #3
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 6 votes |
@Test public void testReadTimeoutExceptionForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); source.subscribe(subscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); recordFlow.exceptionOccurred(new RuntimeException(ReadTimeoutException.INSTANCE)); verify(subscriber).onSubscribe(any()); verify(subscriber).onError(any(RetryableRetrievalException.class)); verify(subscriber, never()).onNext(any()); verify(subscriber, never()).onComplete(); }
Example #4
Source File: IteratorBuilderTest.java From amazon-kinesis-client with Apache License 2.0 | 6 votes |
static WrappedRequest<SubscribeToShardRequest> wrapped(SubscribeToShardRequest.Builder builder) { SubscribeToShardRequest req = builder.build(); return new WrappedRequest<SubscribeToShardRequest>() { @Override public ShardIteratorType shardIteratorType() { return req.startingPosition().type(); } @Override public String sequenceNumber() { return req.startingPosition().sequenceNumber(); } @Override public Instant timestamp() { return req.startingPosition().timestamp(); } @Override public SubscribeToShardRequest request() { return req; } }; }
Example #5
Source File: KinesisStreamRxJavaEx.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
/** * Uses RxJava via the onEventStream lifecycle method. This gives you full access to the publisher, which can be used * to create an Rx Flowable. */ private static CompletableFuture<Void> responseHandlerBuilder_RxJava(KinesisAsyncClient client, SubscribeToShardRequest request) { // snippet-start:[kinesis.java2.stream_rx_example.event_stream] SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> Flowable.fromPublisher(p) .ofType(SubscribeToShardEvent.class) .flatMapIterable(SubscribeToShardEvent::records) .limit(1000) .buffer(25) .subscribe(e -> System.out.println("Record batch = " + e))) .build(); // snippet-end:[kinesis.java2.stream_rx_example.event_stream] return client.subscribeToShard(request, responseHandler); }
Example #6
Source File: FanOutRecordsPublisher.java From amazon-kinesis-client with Apache License 2.0 | 6 votes |
private void subscribeToShard(String sequenceNumber) { synchronized (lockObject) { // Clear the delivery queue so that any stale entries from previous subscription are discarded. resetRecordsDeliveryStateOnSubscriptionOnInit(); SubscribeToShardRequest.Builder builder = KinesisRequestsBuilder.subscribeToShardRequestBuilder() .shardId(shardId).consumerARN(consumerArn); SubscribeToShardRequest request; if (isFirstConnection) { request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended).build(); } else { request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStreamExtended) .build(); } Instant connectionStart = Instant.now(); int subscribeInvocationId = subscribeToShardId.incrementAndGet(); String instanceId = shardId + "-" + subscribeInvocationId; log.debug( "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#subscribeToShard) @ {} id: {} -- Starting subscribe to shard", shardId, connectionStart, instanceId); flow = new RecordFlow(this, connectionStart, instanceId); kinesis.subscribeToShard(request, flow); } }
Example #7
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
public static void main(String[] args) { // snippet-start:[kinesis.java2.stream_example.setup] Region region = Region.US_EAST_1; KinesisAsyncClient client = KinesisAsyncClient.builder() .region(region) .build(); SubscribeToShardRequest request = SubscribeToShardRequest.builder() .consumerARN(CONSUMER_ARN) .shardId("arn:aws:kinesis:us-east-1:814548047983:stream/StockTradeStream") .startingPosition(s -> s.type(ShardIteratorType.LATEST)).build(); // snippet-end:[kinesis.java2.stream_example.setup] SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(MySubscriber::new) .build(); client.subscribeToShard(request, responseHandler); client.close(); }
Example #8
Source File: SubscribeToShardUnmarshallingTest.java From aws-sdk-java-v2 with Apache License 2.0 | 5 votes |
private List<SubscribeToShardEventStream> subscribeToShard() throws Throwable { try { List<SubscribeToShardEventStream> events = new ArrayList<>(); client.subscribeToShard(SubscribeToShardRequest.builder().build(), SubscribeToShardResponseHandler.builder() .subscriber(events::add) .build()) .join(); return events; } catch (CompletionException e) { throw e.getCause(); } }
Example #9
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Subscribes to the publisher using the onEventStream lifecycle callback method, which allows greater control * over the publisher and allows transformation methods on the publisher, like map and buffer */ private static CompletableFuture<Void> responseHandlerBuilderOnEventStream(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onEventStream(p -> p.filter(SubscribeToShardEvent.class).subscribe(new MySubscriber())) .build(); return client.subscribeToShard(request, responseHandler); }
Example #10
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Use the SubscribeToShardResponseHandler.Builder and a traditional subscriber */ // snippet-start:[kinesis.java2.stream_example.subscribe] private static CompletableFuture<Void> responseHandlerBuilderSubscriber(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(MySubscriber::new) .build(); return client.subscribeToShard(request, responseHandler); }
Example #11
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Creates a SubscribeToShardResponseHandler the classic way by implementing the interface */ // snippet-start:[kinesis.java2.stream_example.custom_handler] private static CompletableFuture<Void> responseHandlerBuilderClassic(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = new SubscribeToShardResponseHandler() { @Override public void responseReceived(SubscribeToShardResponse response) { System.out.println("Receieved initial response"); } @Override public void onEventStream(SdkPublisher<SubscribeToShardEventStream> publisher) { publisher // Filter to only SubscribeToShardEvents .filter(SubscribeToShardEvent.class) // Flat map into a publisher of just records .flatMapIterable(SubscribeToShardEvent::records) // Limit to 1000 total records .limit(1000) // Batch records into lists of 25 .buffer(25) // Print out each record batch .subscribe(batch -> System.out.println("Record Batch - " + batch)); } @Override public void complete() { System.out.println("All records stream successfully"); } @Override public void exceptionOccurred(Throwable throwable) { System.err.println("Error during stream - " + throwable.getMessage()); } }; return client.subscribeToShard(request, responseHandler); }
Example #12
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Subscribes to the stream of events by implementing the SubscribeToShardResponseHandler.Visitor interface */ private static CompletableFuture<Void> responseHandlerBuilderVisitor(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler.Visitor visitor = new SubscribeToShardResponseHandler.Visitor() { @Override public void visit(SubscribeToShardEvent event) { System.out.println("Received subscribe to shard event " + event); } }; SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(visitor) .build(); return client.subscribeToShard(request, responseHandler); }
Example #13
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Creates a SubscribeToShardResponseHandler.Visitor using the builder, which lets you register an event handler for * all events you're interested in instead of implementing the interface */ // snippet-start:[kinesis.java2.stream_example.visitor] private static CompletableFuture<Void> responseHandlerBuilderVisitorBuilder(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler.Visitor visitor = SubscribeToShardResponseHandler.Visitor .builder() .onSubscribeToShardEvent(e -> System.out.println("Received subscribe to shard event " + e)) .build(); SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(visitor) .build(); return client.subscribeToShard(request, responseHandler); }
Example #14
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Uses the publisherTransformer method to customize the publisher before ultimately subscribing to it */ // snippet-start:[kinesis.java2.stream_example.publish_transformer] private static CompletableFuture<Void> responseHandlerBuilderPublisherTransformer(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .publisherTransformer(p -> p.filter(e -> e instanceof SubscribeToShardEvent).limit(100)) .subscriber(e -> System.out.println("Received event - " + e)) .build(); return client.subscribeToShard(request, responseHandler); }
Example #15
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Uses the SubscribeToShardResponseHandler.Builder and a simple consumer of events to subscribe */ private static CompletableFuture<Void> responseHandlerBuilderConsumer(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .subscriber(e -> System.out.println("Received event - " + e)) .build(); return client.subscribeToShard(request, responseHandler); }
Example #16
Source File: KinesisStreamEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
private static CompletableFuture<Void> responseHandlerBuilder(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .onComplete(() -> System.out.println("All records stream successfully")) // Must supply some type of subscriber .subscriber(e -> System.out.println("Received event - " + e)) .build(); return client.subscribeToShard(request, responseHandler); }
Example #17
Source File: KinesisStreamRxJavaEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { KinesisAsyncClient client = KinesisAsyncClient.create(); SubscribeToShardRequest request = SubscribeToShardRequest.builder() .consumerARN(CONSUMER_ARN) .shardId("shardId-000000000000") .startingPosition(StartingPosition.builder().type(ShardIteratorType.LATEST).build()) .build(); responseHandlerBuilder_RxJava(client, request).join(); client.close(); }
Example #18
Source File: KinesisStreamRxJavaEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Because a Flowable is also a publisher, the publisherTransformer method integrates nicely with RxJava. Notice that * you must adapt to an SdkPublisher. */ private static CompletableFuture<Void> responseHandlerBuilder_OnEventStream_RxJava(KinesisAsyncClient client, SubscribeToShardRequest request) { // snippet-start:[kinesis.java2.stream_rx_example.publish_transform] SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .publisherTransformer(p -> SdkPublisher.adapt(Flowable.fromPublisher(p).limit(100))) .build(); // snippet-end:[kinesis.java2.stream_rx_example.publish_transform] return client.subscribeToShard(request, responseHandler); }
Example #19
Source File: KinesisStreamReactorEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { KinesisAsyncClient client = KinesisAsyncClient.create(); SubscribeToShardRequest request = SubscribeToShardRequest.builder() .consumerARN(CONSUMER_ARN) .shardId("shardId-000000000000") .startingPosition(StartingPosition.builder().type(ShardIteratorType.LATEST).build()) .build(); responseHandlerBuilder_Reactor(client, request).join(); client.close(); }
Example #20
Source File: KinesisStreamReactorEx.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
/** * Because a Flux is also a publisher, the publisherTransformer method integrates nicely with Reactor. Notice that * you must adapt to an SdkPublisher. */ private static CompletableFuture<Void> responseHandlerBuilder_OnEventStream_Reactor(KinesisAsyncClient client, SubscribeToShardRequest request) { SubscribeToShardResponseHandler responseHandler = SubscribeToShardResponseHandler .builder() .onError(t -> System.err.println("Error during stream - " + t.getMessage())) .publisherTransformer(p -> Flux.from(p).limitRate(100).as(SdkPublisher::adapt)) .build(); return client.subscribeToShard(request, responseHandler); }
Example #21
Source File: KinesisRetryPolicy.java From aws-sdk-java-v2 with Apache License 2.0 | 4 votes |
private static boolean isNotSubscribeToShard(RetryPolicyContext context) { return !(context.originalRequest() instanceof SubscribeToShardRequest); }
Example #22
Source File: SubscribeToShardRequestMatcher.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
public SubscribeToShardRequestMatcher(SubscribeToShardRequest left) { super(); this.left = left; }
Example #23
Source File: IteratorBuilderTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
private SubscribeToShardRequest.Builder stsBase() { return SubscribeToShardRequest.builder().shardId(SHARD_ID).consumerARN(CONSUMER_ARN); }
Example #24
Source File: IteratorBuilderTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
private void verifyStsBase(SubscribeToShardRequest req) { assertThat(req.shardId(), equalTo(SHARD_ID)); assertThat(req.consumerARN(), equalTo(CONSUMER_ARN)); }
Example #25
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void acquireTimeoutTriggersLogMethodForActiveFlow() { AtomicBoolean acquireTimeoutLogged = new AtomicBoolean(false); FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN) { @Override protected void logAcquireTimeoutMessage(Throwable t) { super.logAcquireTimeoutMessage(t); acquireTimeoutLogged.set(true); } }; ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); RecordingSubscriber subscriber = new RecordingSubscriber(); source.subscribe(subscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); Throwable exception = new CompletionException( "software.amazon.awssdk.core.exception.SdkClientException", SdkClientException.create(null, new Throwable( "Acquire operation took longer than the configured maximum time. This indicates that a " + "request cannot get a connection from the pool within the specified maximum time. " + "This can be due to high request rate.\n" + "Consider taking any of the following actions to mitigate the issue: increase max " + "connections, increase acquire timeout, or slowing the request rate.\n" + "Increasing the max connections can increase client throughput (unless the network " + "interface is already fully utilized), but can eventually start to hit operation " + "system limitations on the number of file descriptors used by the process. " + "If you already are fully utilizing your network interface or cannot further " + "increase your connection count, increasing the acquire timeout gives extra time " + "for requests to acquire a connection before timing out. " + "If the connections doesn't free up, the subsequent requests will still timeout.\n" + "If the above mechanisms are not able to fix the issue, try smoothing out your " + "requests so that large traffic bursts cannot overload the client, being more " + "efficient with the number of times you need to call AWS, or by increasing the " + "number of hosts sending requests."))); flowCaptor.getValue().exceptionOccurred(exception); Optional<OnErrorEvent> onErrorEvent = subscriber.events.stream().filter(e -> e instanceof OnErrorEvent).map(e -> (OnErrorEvent)e).findFirst(); assertThat(onErrorEvent, equalTo(Optional.of(new OnErrorEvent(exception)))); assertThat(acquireTimeoutLogged.get(), equalTo(true)); }
Example #26
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testContinuesAfterSequence() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(new ExtendedSequenceNumber("0"), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); NonFailingSubscriber nonFailingSubscriber = new NonFailingSubscriber(); source.subscribe(new ShardConsumerNotifyingSubscriber(nonFailingSubscriber, source)); SubscribeToShardRequest expected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) .startingPosition(StartingPosition.builder().sequenceNumber("0") .type(ShardIteratorType.AT_SEQUENCE_NUMBER).build()) .build(); verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) .continuationSequenceNumber("3").build(); captor.getValue().onNext(batchEvent); captor.getValue().onComplete(); flowCaptor.getValue().complete(); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> nextSubscribeCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> nextFlowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN) .shardId(SHARD_ID).startingPosition(StartingPosition.builder().sequenceNumber("3") .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER).build()) .build(); verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); reset(publisher); doNothing().when(publisher).subscribe(nextSubscribeCaptor.capture()); nextFlowCaptor.getValue().onEventStream(publisher); nextSubscribeCaptor.getValue().onSubscribe(subscription); List<Record> nextRecords = Stream.of(4, 5, 6).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> nextMatchers = nextRecords.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(nextRecords) .continuationSequenceNumber("6").build(); nextSubscribeCaptor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); assertThat(nonFailingSubscriber.received.size(), equalTo(2)); verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers); verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers); }
Example #27
Source File: SubscribeToShardRequestMatcher.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
public boolean matches(Object rightObject) { SubscribeToShardRequest right = (SubscribeToShardRequest)rightObject; return left.shardId().equals(right.shardId()) && left.consumerARN().equals(right.consumerARN()) && left.startingPosition().equals(right.startingPosition()); }
Example #28
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void largeRequestTest() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber<RecordsRetrieved>() { Subscription subscription; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(3); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); assertThat(receivedInput.size(), equalTo(3)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); }
Example #29
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(1); int totalServicePublisherEvents = 1000; int initialDemand = 11; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); AtomicBoolean onErrorSet = new AtomicBoolean(false); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); onErrorSet.set(true); servicePublisherTaskCompletionLatch.countDown(); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertTrue("onError should have triggered", onErrorSet.get()); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); }
Example #30
Source File: FanOutRecordsPublisherTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
@Test public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); ArgumentCaptor<FanOutRecordsPublisher.RecordSubscription> captor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordSubscription.class); ArgumentCaptor<FanOutRecordsPublisher.RecordFlow> flowCaptor = ArgumentCaptor .forClass(FanOutRecordsPublisher.RecordFlow.class); List<Record> records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); Consumer<Integer> servicePublisherAction = contSeqNum -> captor.getValue().onNext( SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) .build()); CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 9; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, initialDemand); doNothing().when(publisher).subscribe(captor.capture()); source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List<ProcessRecordsInput> receivedInput = new ArrayList<>(); Subscriber<RecordsRetrieved> shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber<RecordsRetrieved>() { private Subscription subscription; private int lastSeenSeqNum = 0; @Override public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } @Override public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); assertEquals("" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved)input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); if(receivedInput.size() == totalServicePublisherEvents) { servicePublisherTaskCompletionLatch.countDown(); } } @Override public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } @Override public void onComplete() { fail("OnComplete called when not expected"); } }, source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List<KinesisClientRecordMatcher> matchers = records.stream().map(KinesisClientRecordMatcher::new) .collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); assertThat(receivedInput.size(), equalTo(totalServicePublisherEvents)); receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { assertThat(clientRecordsList.size(), equalTo(matchers.size())); for (int i = 0; i < clientRecordsList.size(); ++i) { assertThat(clientRecordsList.get(i), matchers.get(i)); } }); assertThat(source.getCurrentSequenceNumber(), equalTo(totalServicePublisherEvents + "")); }