com.amazonaws.services.kinesis.AmazonKinesis Java Examples
The following examples show how to use
com.amazonaws.services.kinesis.AmazonKinesis.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisStreamProvisionerTests.java From spring-cloud-stream-binder-aws-kinesis with Apache License 2.0 | 6 votes |
@Test void testProvisionProducerSuccessfulWithExistingStream() { AmazonKinesis amazonKinesisMock = mock(AmazonKinesis.class); KinesisBinderConfigurationProperties binderProperties = new KinesisBinderConfigurationProperties(); KinesisStreamProvisioner provisioner = new KinesisStreamProvisioner( amazonKinesisMock, binderProperties); ExtendedProducerProperties<KinesisProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<>(new KinesisProducerProperties()); String name = "test-stream"; DescribeStreamResult describeStreamResult = describeStreamResultWithShards( Collections.singletonList(new Shard())); when(amazonKinesisMock.describeStream(any(DescribeStreamRequest.class))) .thenReturn(describeStreamResult); ProducerDestination destination = provisioner.provisionProducerDestination(name, extendedProducerProperties); verify(amazonKinesisMock).describeStream(any(DescribeStreamRequest.class)); assertThat(destination.getName()).isEqualTo(name); }
Example #2
Source File: SampleAggregatorProducer.java From kinesis-aggregation with Apache License 2.0 | 6 votes |
/** * Use the callback mechanism and a lambda function to send aggregated * records to Kinesis. */ private static void sendViaCallback(AmazonKinesis producer, String streamName, RecordAggregator aggregator) { // add a lambda callback to be called when a full record is ready to // transmit aggregator.onRecordComplete((aggRecord) -> { sendRecord(producer, streamName, aggRecord); }); System.out.println("Creating " + ProducerConfig.RECORDS_TO_TRANSMIT + " records..."); for (int i = 1; i <= ProducerConfig.RECORDS_TO_TRANSMIT; i++) { String pk = ProducerUtils.randomPartitionKey(); String ehk = ProducerUtils.randomExplicitHashKey(); byte[] data = ProducerUtils.randomData(i, ProducerConfig.RECORD_SIZE_BYTES); try { aggregator.addUserRecord(pk, ehk, data); } catch (Exception e) { e.printStackTrace(); System.err.println("Failed to add user record: " + e.getMessage()); } } flushAndFinish(producer, streamName, aggregator); }
Example #3
Source File: KinesisProxyTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGetShardWithNoNewShards() throws Exception { // given String fakeStreamName = "fake-stream"; AmazonKinesis mockClient = mock(AmazonKinesis.class); KinesisProxy kinesisProxy = getProxy(mockClient); Mockito.when(mockClient.listShards( new ListShardsRequest() .withStreamName(fakeStreamName) .withExclusiveStartShardId(KinesisShardIdGenerator.generateFromShardOrder(1)) )).thenReturn(new ListShardsResult().withShards(Collections.emptyList())); HashMap<String, String> streamHashMap = new HashMap<>(); streamHashMap.put(fakeStreamName, KinesisShardIdGenerator.generateFromShardOrder(1)); // when GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); // then Assert.assertFalse(shardListResult.hasRetrievedShards()); }
Example #4
Source File: AWSUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Creates an Amazon Kinesis Client. * @param configProps configuration properties containing the access key, secret key, and region * @param awsClientConfig preconfigured AWS SDK client configuration * @return a new Amazon Kinesis Client */ public static AmazonKinesis createKinesisClient(Properties configProps, ClientConfiguration awsClientConfig) { // set a Flink-specific user agent awsClientConfig.setUserAgentPrefix(String.format(USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); // utilize automatic refreshment of credentials by directly passing the AWSCredentialsProvider AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard() .withCredentials(AWSUtil.getCredentialsProvider(configProps)) .withClientConfiguration(awsClientConfig); if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) { // Set signingRegion as null, to facilitate mocking Kinesis for local tests builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT), null)); } else { builder.withRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION))); } return builder.build(); }
Example #5
Source File: SpringLocalstackDockerRunnerTest.java From spring-localstack with Apache License 2.0 | 6 votes |
@Test public void testKinesis() throws Exception { AmazonKinesis kinesis = amazonDockerClientsHolder.amazonKinesis(); ListStreamsResult streamsResult = kinesis.listStreams(); assertThat(streamsResult.getStreamNames().size(), is(0)); CreateStreamRequest createStreamRequest = new CreateStreamRequest() .withStreamName("test-stream") .withShardCount(2); kinesis.createStream(createStreamRequest); streamsResult = kinesis.listStreams(); assertThat(streamsResult.getStreamNames(), hasItem("test-stream")); }
Example #6
Source File: KinesisUtil.java From datacollector with Apache License 2.0 | 6 votes |
public static List<com.amazonaws.services.kinesis.model.Record> getPreviewRecords( ClientConfiguration awsClientConfig, KinesisConfigBean conf, int maxBatchSize, GetShardIteratorRequest getShardIteratorRequest ) throws StageException { AmazonKinesis kinesisClient = getKinesisClient(awsClientConfig, conf); GetShardIteratorResult getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest); String shardIterator = getShardIteratorResult.getShardIterator(); GetRecordsRequest getRecordsRequest = new GetRecordsRequest(); getRecordsRequest.setShardIterator(shardIterator); getRecordsRequest.setLimit(maxBatchSize); GetRecordsResult getRecordsResult = kinesisClient.getRecords(getRecordsRequest); return getRecordsResult.getRecords(); }
Example #7
Source File: DynamoDBStreamsProxy.java From flink with Apache License 2.0 | 6 votes |
/** * Creates an AmazonDynamoDBStreamsAdapterClient. * Uses it as the internal client interacting with the DynamoDB streams. * * @param configProps configuration properties * @return an AWS DynamoDB streams adapter client */ @Override protected AmazonKinesis createKinesisClient(Properties configProps) { ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig(); setAwsClientConfigProperties(awsClientConfig, configProps); AWSCredentialsProvider credentials = getCredentialsProvider(configProps); awsClientConfig.setUserAgentPrefix( String.format( USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig); if (configProps.containsKey(AWS_ENDPOINT)) { adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT)); } else { adapterClient.setRegion(Region.getRegion( Regions.fromName(configProps.getProperty(AWS_REGION)))); } return adapterClient; }
Example #8
Source File: KinesisProxyTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testGetShardWithNoNewShards() throws Exception { // given String fakeStreamName = "fake-stream"; AmazonKinesis mockClient = mock(AmazonKinesis.class); KinesisProxy kinesisProxy = getProxy(mockClient); Mockito.when(mockClient.listShards( new ListShardsRequest() .withStreamName(fakeStreamName) .withExclusiveStartShardId(KinesisShardIdGenerator.generateFromShardOrder(1)) )).thenReturn(new ListShardsResult().withShards(Collections.emptyList())); HashMap<String, String> streamHashMap = new HashMap<>(); streamHashMap.put(fakeStreamName, KinesisShardIdGenerator.generateFromShardOrder(1)); // when GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); // then Assert.assertFalse(shardListResult.hasRetrievedShards()); }
Example #9
Source File: AWSUtil.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Creates an Amazon Kinesis Client. * @param configProps configuration properties containing the access key, secret key, and region * @param awsClientConfig preconfigured AWS SDK client configuration * @return a new Amazon Kinesis Client */ public static AmazonKinesis createKinesisClient(Properties configProps, ClientConfiguration awsClientConfig) { // set a Flink-specific user agent awsClientConfig.setUserAgentPrefix(String.format(USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); // utilize automatic refreshment of credentials by directly passing the AWSCredentialsProvider AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard() .withCredentials(AWSUtil.getCredentialsProvider(configProps)) .withClientConfiguration(awsClientConfig); if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) { // Set signingRegion as null, to facilitate mocking Kinesis for local tests builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT), null)); } else { builder.withRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION))); } return builder.build(); }
Example #10
Source File: DynamoDBStreamsProxy.java From flink with Apache License 2.0 | 6 votes |
/** * Creates an AmazonDynamoDBStreamsAdapterClient. * Uses it as the internal client interacting with the DynamoDB streams. * * @param configProps configuration properties * @return an AWS DynamoDB streams adapter client */ @Override protected AmazonKinesis createKinesisClient(Properties configProps) { ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig(); setAwsClientConfigProperties(awsClientConfig, configProps); AWSCredentialsProvider credentials = getCredentialsProvider(configProps); awsClientConfig.setUserAgentPrefix( String.format( USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig); if (configProps.containsKey(AWS_ENDPOINT)) { adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT)); } else { adapterClient.setRegion(Region.getRegion( Regions.fromName(configProps.getProperty(AWS_REGION)))); } return adapterClient; }
Example #11
Source File: DynamoDBStreamsProxy.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Creates an AmazonDynamoDBStreamsAdapterClient. * Uses it as the internal client interacting with the DynamoDB streams. * * @param configProps configuration properties * @return an AWS DynamoDB streams adapter client */ @Override protected AmazonKinesis createKinesisClient(Properties configProps) { ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig(); setAwsClientConfigProperties(awsClientConfig, configProps); AWSCredentialsProvider credentials = getCredentialsProvider(configProps); awsClientConfig.setUserAgentPrefix( String.format( USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient(credentials, awsClientConfig); if (configProps.containsKey(AWS_ENDPOINT)) { adapterClient.setEndpoint(configProps.getProperty(AWS_ENDPOINT)); } else { adapterClient.setRegion(Region.getRegion( Regions.fromName(configProps.getProperty(AWS_REGION)))); } return adapterClient; }
Example #12
Source File: AWSUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Creates an Amazon Kinesis Client. * @param configProps configuration properties containing the access key, secret key, and region * @param awsClientConfig preconfigured AWS SDK client configuration * @return a new Amazon Kinesis Client */ public static AmazonKinesis createKinesisClient(Properties configProps, ClientConfiguration awsClientConfig) { // set a Flink-specific user agent awsClientConfig.setUserAgentPrefix(String.format(USER_AGENT_FORMAT, EnvironmentInformation.getVersion(), EnvironmentInformation.getRevisionInformation().commitId)); // utilize automatic refreshment of credentials by directly passing the AWSCredentialsProvider AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder.standard() .withCredentials(AWSUtil.getCredentialsProvider(configProps)) .withClientConfiguration(awsClientConfig); if (configProps.containsKey(AWSConfigConstants.AWS_ENDPOINT)) { // If an endpoint is specified, we give preference to using an endpoint and use the region property to // sign the request. builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( configProps.getProperty(AWSConfigConstants.AWS_ENDPOINT), configProps.getProperty(AWSConfigConstants.AWS_REGION))); } else { builder.withRegion(Regions.fromName(configProps.getProperty(AWSConfigConstants.AWS_REGION))); } return builder.build(); }
Example #13
Source File: AWSServiceFactory.java From camel-kafka-connector with Apache License 2.0 | 6 votes |
public static AWSService<AmazonKinesis> createKinesisService() { String awsInstanceType = System.getProperty("aws-service.kinesis.instance.type"); LOG.info("Creating a {} AWS kinesis instance", getInstanceTypeName(awsInstanceType)); if (awsInstanceType == null || awsInstanceType.equals("local-aws-container")) { return new AWSKinesisLocalContainerService(); } if (awsInstanceType.equals("remote")) { return new AWSRemoteService<>(AWSClientUtils::newKinesisClient); } LOG.error("Invalid AWS instance type: {}. Must be either 'remote' or 'local-aws-container'", awsInstanceType); throw new UnsupportedOperationException("Invalid AWS instance type"); }
Example #14
Source File: KinesisProxyTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testGetShardWithNoNewShards() throws Exception { // given String fakeStreamName = "fake-stream"; AmazonKinesis mockClient = mock(AmazonKinesis.class); KinesisProxy kinesisProxy = getProxy(mockClient); Mockito.when(mockClient.listShards( new ListShardsRequest() .withStreamName(fakeStreamName) .withExclusiveStartShardId(KinesisShardIdGenerator.generateFromShardOrder(1)) )).thenReturn(new ListShardsResult().withShards(Collections.emptyList())); HashMap<String, String> streamHashMap = new HashMap<>(); streamHashMap.put(fakeStreamName, KinesisShardIdGenerator.generateFromShardOrder(1)); // when GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); // then Assert.assertFalse(shardListResult.hasRetrievedShards()); }
Example #15
Source File: KinesisDatasetRuntime.java From components with Apache License 2.0 | 5 votes |
@Override public Set<String> listStreams() { AmazonKinesis amazonKinesis = KinesisClient.create(properties); ListStreamsResult listStreamsResult = amazonKinesis.listStreams(); List<String> streamNames = listStreamsResult.getStreamNames(); Set<String> streamNamesCollection = new HashSet(streamNames); while (listStreamsResult.isHasMoreStreams() && !streamNames.isEmpty()) { listStreamsResult = amazonKinesis.listStreams(streamNames.get(streamNames.size() - 1)); streamNames = listStreamsResult.getStreamNames(); streamNamesCollection.addAll(streamNames); } return streamNamesCollection; }
Example #16
Source File: TalendKinesisProvider.java From components with Apache License 2.0 | 5 votes |
@Override public AmazonKinesis getKinesisClient() { AmazonKinesisClientBuilder clientBuilder = AmazonKinesisClientBuilder.standard().withCredentials(getCredentialsProvier()); if (specifyEndpoint) { clientBuilder .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region.getName())); } else { clientBuilder.setRegion(region.getName()); } return clientBuilder.build(); }
Example #17
Source File: KinesisDatastoreRuntime.java From components with Apache License 2.0 | 5 votes |
@Override public Iterable<ValidationResult> doHealthChecks(RuntimeContainer container) { AmazonKinesis amazonKinesis = KinesisClient.create(properties); try { ListStreamsResult listStreamsResult = amazonKinesis.listStreams(); return Arrays.asList(ValidationResult.OK); } catch (Exception e) { return Arrays.asList(new ValidationResult(ValidationResult.Result.ERROR, e.getMessage())); } }
Example #18
Source File: SampleAggregatorProducer.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
public static void main(String[] args) { if (args.length != 2) { System.err.println("USAGE: SampleAggregatorProducer <stream name> <region>"); System.exit(1); } String streamName = args[0]; String regionName = args[1]; final AmazonKinesis producer = ProducerUtils.getKinesisProducer(regionName); final RecordAggregator aggregator = new RecordAggregator(); sendViaCallback(producer, streamName, aggregator); // sendViaBatch(producer, streamName, aggregator); }
Example #19
Source File: SampleAggregatorProducer.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Use the synchronous batch mechanism to send aggregated records to * Kinesis. */ @SuppressWarnings("unused") private static void sendViaBatch(AmazonKinesis producer, String streamName, RecordAggregator aggregator) { System.out.println("Creating " + ProducerConfig.RECORDS_TO_TRANSMIT + " records..."); for (int i = 1; i <= ProducerConfig.RECORDS_TO_TRANSMIT; i++) { String pk = ProducerUtils.randomPartitionKey(); String ehk = ProducerUtils.randomExplicitHashKey(); byte[] data = ProducerUtils.randomData(i, ProducerConfig.RECORD_SIZE_BYTES); // addUserRecord returns non-null when a full record is ready to // transmit try { final AggRecord aggRecord = aggregator.addUserRecord(pk, ehk, data); if (aggRecord != null) { ForkJoinPool.commonPool().execute(() -> { sendRecord(producer, streamName, aggRecord); }); } } catch (Exception e) { e.printStackTrace(); System.err.println("Failed to add user record: " + e.getMessage()); } } flushAndFinish(producer, streamName, aggregator); }
Example #20
Source File: ProducerBase.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
/** * @param eventsQueue The queue that holds the records to send to Kinesis * @param kinesisClient Reference to the Kinesis client * @param streamName The stream name to send items to */ public ProducerBase(BlockingQueue<Event> eventsQueue, AmazonKinesis kinesisClient, String streamName) { this.eventsQueue = eventsQueue; this.kinesisClient = kinesisClient; this.streamName = streamName; }
Example #21
Source File: KinesisUtil.java From datacollector with Apache License 2.0 | 5 votes |
public static long getShardCount( ClientConfiguration awsClientConfig, KinesisConfigBean conf, String streamName ) throws StageException { AmazonKinesis kinesisClient = getKinesisClient(awsClientConfig, conf); try { long numShards = 0; String lastShardId = null; StreamDescription description; do { if (lastShardId == null) { description = kinesisClient.describeStream(streamName).getStreamDescription(); } else { description = kinesisClient.describeStream(streamName, lastShardId).getStreamDescription(); } for (Shard shard : description.getShards()) { if (shard.getSequenceNumberRange().getEndingSequenceNumber() == null) { // Then this shard is open, so we should count it. Shards with an ending sequence number // are closed and cannot be written to, so we skip counting them. ++numShards; } } int pageSize = description.getShards().size(); lastShardId = description.getShards().get(pageSize - 1).getShardId(); } while (description.getHasMoreShards()); LOG.debug("Connected successfully to stream: '{}' with '{}' shards.", streamName, numShards); return numShards; } finally { kinesisClient.shutdown(); } }
Example #22
Source File: SampleAggregatorProducer.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Flush out and send any remaining records from the aggregator and then * wait for all pending transmissions to finish. */ private static void flushAndFinish(AmazonKinesis producer, String streamName, RecordAggregator aggregator) { // Do one final flush & send to get any remaining records that haven't // triggered a callback yet AggRecord finalRecord = aggregator.clearAndGet(); ForkJoinPool.commonPool().execute(() -> { sendRecord(producer, streamName, finalRecord); }); // Wait up to 2 minutes for all the publisher threads to finish System.out.println("Waiting for all transmissions to complete..."); ForkJoinPool.commonPool().awaitQuiescence(2, TimeUnit.MINUTES); System.out.println("Transmissions complete."); }
Example #23
Source File: SampleAggregatorProducer.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Send an aggregated record to Kinesis using the specified producer and * stream name. */ private static void sendRecord(AmazonKinesis producer, String streamName, AggRecord aggRecord) { if (aggRecord == null || aggRecord.getNumUserRecords() == 0) { return; } System.out.println("Submitting record EHK=" + aggRecord.getExplicitHashKey() + " NumRecords=" + aggRecord.getNumUserRecords() + " NumBytes=" + aggRecord.getSizeBytes()); try { producer.putRecord(aggRecord.toPutRecordRequest(streamName)); } catch (Exception e) { e.printStackTrace(); } System.out.println("Completed record EHK=" + aggRecord.getExplicitHashKey()); }
Example #24
Source File: SampleAggregatorProducerKCLCompliant.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
public static void main(String[] args) { if (args.length != 2) { System.err.println("USAGE: SampleAggregatorProducerKCLCompliant <stream name> <region>"); System.exit(1); } String streamName = args[0]; String regionName = args[1]; final AmazonKinesis producer = ProducerUtils.getKinesisProducer(regionName); final RecordAggregator aggregator = new RecordAggregator(); sendViaBatch(producer, streamName, aggregator); }
Example #25
Source File: SampleAggregatorProducerKCLCompliant.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Use the synchronous batch mechanism to send aggregated records to * Kinesis. */ @SuppressWarnings("unused") private static void sendViaBatch(AmazonKinesis producer, String streamName, RecordAggregator aggregator) { System.out.println("Creating " + ProducerConfig.RECORDS_TO_TRANSMIT + " records..."); String pk = ProducerUtils.randomPartitionKey(); String ehk = ProducerUtils.randomExplicitHashKey(); for (int i = 1; i <= ProducerConfig.RECORDS_TO_TRANSMIT; i++) { byte[] data = ProducerUtils.randomData(i, ProducerConfig.RECORD_SIZE_BYTES); // addUserRecord returns non-null when a full record is ready to // transmit try { final AggRecord aggRecord = aggregator.addUserRecord(pk, ehk, data); if (aggRecord != null) { pk = ProducerUtils.randomPartitionKey(); ehk = ProducerUtils.randomExplicitHashKey(); ForkJoinPool.commonPool().execute(() -> { sendRecord(producer, streamName, aggRecord); }); } } catch (Exception e) { e.printStackTrace(); System.err.println("Failed to add user record: " + e.getMessage()); } } flushAndFinish(producer, streamName, aggregator); }
Example #26
Source File: SampleAggregatorProducerKCLCompliant.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Flush out and send any remaining records from the aggregator and then * wait for all pending transmissions to finish. */ private static void flushAndFinish(AmazonKinesis producer, String streamName, RecordAggregator aggregator) { // Do one final flush & send to get any remaining records that haven't // triggered a callback yet AggRecord finalRecord = aggregator.clearAndGet(); ForkJoinPool.commonPool().execute(() -> { sendRecord(producer, streamName, finalRecord); }); // Wait up to 2 minutes for all the publisher threads to finish System.out.println("Waiting for all transmissions to complete..."); ForkJoinPool.commonPool().awaitQuiescence(2, TimeUnit.MINUTES); System.out.println("Transmissions complete."); }
Example #27
Source File: SampleAggregatorProducerKCLCompliant.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Send an aggregated record to Kinesis using the specified producer and * stream name. */ private static void sendRecord(AmazonKinesis producer, String streamName, AggRecord aggRecord) { if (aggRecord == null || aggRecord.getNumUserRecords() == 0) { return; } System.out.println("Submitting record EHK=" + aggRecord.getExplicitHashKey() + " NumRecords=" + aggRecord.getNumUserRecords() + " NumBytes=" + aggRecord.getSizeBytes()); try { producer.putRecord(aggRecord.toPutRecordRequest(streamName)); } catch (Exception e) { e.printStackTrace(); } System.out.println("Completed record EHK=" + aggRecord.getExplicitHashKey()); }
Example #28
Source File: ProducerUtils.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
/** * Create a new Kinesis producer for publishing to Kinesis. * * @param region The region of the Kinesis stream to publish to. * * @return An Amazon Kinesis producer for publishing to a Kinesis stream. */ public static AmazonKinesis getKinesisProducer(String region) { ClientConfiguration config = new ClientConfiguration(); config.setMaxConnections(25); config.setConnectionTimeout(60000); config.setSocketTimeout(60000); AmazonKinesis producer = new AmazonKinesisClient(new DefaultAWSCredentialsProviderChain(), config); producer.setRegion(Region.getRegion(Regions.fromName(region))); return producer; }
Example #29
Source File: KinesisStreamProvisionerTests.java From spring-cloud-stream-binder-aws-kinesis with Apache License 2.0 | 5 votes |
@Test void testProvisionConsumerResourceNotFoundException() { AmazonKinesis amazonKinesisMock = mock(AmazonKinesis.class); KinesisBinderConfigurationProperties binderProperties = new KinesisBinderConfigurationProperties(); binderProperties.setAutoCreateStream(false); KinesisStreamProvisioner provisioner = new KinesisStreamProvisioner( amazonKinesisMock, binderProperties); int instanceCount = 1; int concurrency = 1; ExtendedConsumerProperties<KinesisConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>( new KinesisConsumerProperties()); extendedConsumerProperties.setInstanceCount(instanceCount); extendedConsumerProperties.setConcurrency(concurrency); String name = "test-stream"; String group = "test-group"; when(amazonKinesisMock.describeStream(any(DescribeStreamRequest.class))) .thenThrow(new ResourceNotFoundException("Stream not found")); assertThatThrownBy(() -> provisioner.provisionConsumerDestination(name, group, extendedConsumerProperties)) .isInstanceOf(ProvisioningException.class) .hasMessageContaining( "The stream [test-stream] was not found and auto creation is disabled.") .hasCauseInstanceOf(ResourceNotFoundException.class); verify(amazonKinesisMock, times(1)) .describeStream(any(DescribeStreamRequest.class)); verify(amazonKinesisMock, never()).createStream(name, instanceCount * concurrency); }
Example #30
Source File: KinesisStreamProvisionerTests.java From spring-cloud-stream-binder-aws-kinesis with Apache License 2.0 | 5 votes |
@Test void testProvisionConsumerSuccessfulWithNewStream() { AmazonKinesis amazonKinesisMock = mock(AmazonKinesis.class); KinesisBinderConfigurationProperties binderProperties = new KinesisBinderConfigurationProperties(); KinesisStreamProvisioner provisioner = new KinesisStreamProvisioner( amazonKinesisMock, binderProperties); int instanceCount = 1; int concurrency = 1; ExtendedConsumerProperties<KinesisConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>( new KinesisConsumerProperties()); extendedConsumerProperties.setInstanceCount(instanceCount); extendedConsumerProperties.setConcurrency(concurrency); String name = "test-stream"; String group = "test-group"; DescribeStreamResult describeStreamResult = describeStreamResultWithShards( Collections.singletonList(new Shard())); when(amazonKinesisMock.describeStream(any(DescribeStreamRequest.class))) .thenThrow(new ResourceNotFoundException("I got nothing")) .thenReturn(describeStreamResult); when(amazonKinesisMock.createStream(name, instanceCount * concurrency)) .thenReturn(new CreateStreamResult()); ConsumerDestination destination = provisioner.provisionConsumerDestination(name, group, extendedConsumerProperties); verify(amazonKinesisMock, times(2)) .describeStream(any(DescribeStreamRequest.class)); verify(amazonKinesisMock).createStream(name, instanceCount * concurrency); assertThat(destination.getName()).isEqualTo(name); }