com.amazonaws.services.kinesis.model.PutRecordsRequest Java Examples
The following examples show how to use
com.amazonaws.services.kinesis.model.PutRecordsRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRecordAccess.java From presto with Apache License 2.0 | 6 votes |
private void createJsonMessages(String streamName, int count, int idStart) { String jsonFormat = "{\"id\" : %d, \"name\" : \"%s\"}"; PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); long id = idStart + i; String name = UUID.randomUUID().toString(); String jsonVal = String.format(jsonFormat, id, name); // ? with StandardCharsets.UTF_8 putRecordsRequestEntry.setData(ByteBuffer.wrap(jsonVal.getBytes(UTF_8))); putRecordsRequestEntry.setPartitionKey(Long.toString(id)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); mockClient.putRecords(putRecordsRequest); }
Example #2
Source File: MockKinesisClient.java From presto with Apache License 2.0 | 6 votes |
@Override public PutRecordsResult putRecords(PutRecordsRequest putRecordsRequest) throws AmazonClientException { // Setup method to add a batch of new records: InternalStream theStream = this.getStream(putRecordsRequest.getStreamName()); if (theStream != null) { PutRecordsResult result = new PutRecordsResult(); List<PutRecordsResultEntry> resultList = new ArrayList<>(); for (PutRecordsRequestEntry entry : putRecordsRequest.getRecords()) { PutRecordResult putResult = theStream.putRecord(entry.getData(), entry.getPartitionKey()); resultList.add((new PutRecordsResultEntry()).withShardId(putResult.getShardId()).withSequenceNumber(putResult.getSequenceNumber())); } result.setRecords(resultList); return result; } else { throw new AmazonClientException("This stream does not exist!"); } }
Example #3
Source File: TestMinimalFunctionality.java From presto-kinesis with Apache License 2.0 | 6 votes |
private void createMessages(String streamName, int count) throws Exception { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setData(ByteBuffer.wrap(UUID.randomUUID().toString().getBytes())); putRecordsRequestEntry.setPartitionKey(Long.toString(i)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); embeddedKinesisStream.getKinesisClient().putRecords(putRecordsRequest); }
Example #4
Source File: MockKinesisClient.java From presto-kinesis with Apache License 2.0 | 6 votes |
@Override public PutRecordsResult putRecords(PutRecordsRequest putRecordsRequest) throws AmazonServiceException, AmazonClientException { // Setup method to add a batch of new records: InternalStream theStream = this.getStream(putRecordsRequest.getStreamName()); if (theStream != null) { PutRecordsResult result = new PutRecordsResult(); ArrayList<PutRecordsResultEntry> resultList = new ArrayList<PutRecordsResultEntry>(); for (PutRecordsRequestEntry entry : putRecordsRequest.getRecords()) { PutRecordResult putResult = theStream.putRecord(entry.getData(), entry.getPartitionKey()); resultList.add((new PutRecordsResultEntry()).withShardId(putResult.getShardId()).withSequenceNumber(putResult.getSequenceNumber())); } result.setRecords(resultList); return result; } else { throw new AmazonClientException("This stream does not exist!"); } }
Example #5
Source File: TestRecordAccess.java From presto-kinesis with Apache License 2.0 | 6 votes |
private void createJsonMessages(String streamName, int count, int idStart) throws Exception { String jsonFormat = "{\"id\" : %d, \"name\" : \"%s\"}"; PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); long id = idStart + i; String name = UUID.randomUUID().toString(); String jsonVal = String.format(jsonFormat, id, name); // ? with StandardCharsets.UTF_8 putRecordsRequestEntry.setData(ByteBuffer.wrap(jsonVal.getBytes())); putRecordsRequestEntry.setPartitionKey(Long.toString(id)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); mockClient.putRecords(putRecordsRequest); }
Example #6
Source File: TestRecordAccess.java From presto-kinesis with Apache License 2.0 | 6 votes |
private void createDummyMessages(String streamName, int count) throws Exception { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setData(ByteBuffer.wrap(UUID.randomUUID().toString().getBytes())); putRecordsRequestEntry.setPartitionKey(Long.toString(i)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); mockClient.putRecords(putRecordsRequest); }
Example #7
Source File: KinesisTestProducer.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
private void generateRecords() { // Create dummy message int recordNo = 1; while (recordNo <= sendCount) { String dataStr = "Record_" + recordNo; PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry(); putRecordsEntry.setData(ByteBuffer.wrap(dataStr.getBytes())); putRecordsEntry.setPartitionKey(dataStr); putRecordsRequestEntryList.add(putRecordsEntry); if ( (putRecordsRequestEntryList.size() == batchSize) || (recordNo == sendCount )) { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); putRecordsRequest.setRecords(putRecordsRequestEntryList); client.putRecords(putRecordsRequest); putRecordsRequestEntryList.clear(); } recordNo++; } }
Example #8
Source File: TestRecordAccess.java From presto with Apache License 2.0 | 5 votes |
private void createDummyMessages(String streamName, int count) { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setData(ByteBuffer.wrap(UUID.randomUUID().toString().getBytes(UTF_8))); putRecordsRequestEntry.setPartitionKey(Long.toString(i)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); mockClient.putRecords(putRecordsRequest); }
Example #9
Source File: RecordBatcher.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
public Optional<PutRecordsRequest> flush() { if (entries.size() > 0) { PutRecordsRequest r = new PutRecordsRequest(); r.setRecords(entries); entries = new ArrayList<>(); requestSize = 0; return Optional.of(r); } else { return Optional.empty(); } }
Example #10
Source File: RecordBatcher.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
public Optional<PutRecordsRequest> put(PutRecordsRequestEntry entry) { int newRequestSize = requestSize + entry.getData().remaining() + entry.getPartitionKey().length(); if (entries.size() < maxCount && newRequestSize <= maxSize) { requestSize = newRequestSize; entries.add(entry); return Optional.empty(); } else { Optional<PutRecordsRequest> ret = flush(); put(entry); return ret; } }
Example #11
Source File: AbstractKinesisOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void flushRecords() { try { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); putRecordsRequest.setRecords(putRecordsRequestEntryList); client.putRecords(putRecordsRequest); putRecordsRequestEntryList.clear(); logger.debug( "Records flushed." ); } catch (AmazonClientException e) { logger.warn( "PutRecordsRequest exception.", e ); throw new RuntimeException(e); } }
Example #12
Source File: SampleNormalProducer.java From kinesis-aggregation with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (args.length != 2) { System.err.println("USAGE: SampleNormalProducer <stream name> <region>"); System.exit(1); } String streamName = args[0]; String regionName = args[1]; AmazonKinesis producer = ProducerUtils.getKinesisProducer(regionName); System.out.println("Creating " + ProducerConfig.RECORDS_TO_TRANSMIT + " records..."); List<PutRecordsRequestEntry> entries = new LinkedList<>(); for (int i = 1; i <= ProducerConfig.RECORDS_TO_TRANSMIT; i++) { byte[] data = ProducerUtils.randomData(i, ProducerConfig.RECORD_SIZE_BYTES); entries.add(new PutRecordsRequestEntry() .withPartitionKey(ProducerUtils.randomPartitionKey()) .withExplicitHashKey(ProducerUtils.randomExplicitHashKey()) .withData(ByteBuffer.wrap(data))); } PutRecordsRequest request = new PutRecordsRequest().withRecords(entries).withStreamName(streamName); System.out.println("Sending " + ProducerConfig.RECORDS_TO_TRANSMIT + " records..."); producer.putRecords(request); System.out.println("Complete."); }
Example #13
Source File: KinesisServiceImpl.java From Serverless-Programming-Cookbook with MIT License | 5 votes |
private void flushBatch(final String streamName, final LambdaLogger logger) { final PutRecordsResult result = this.kinesisClient.putRecords(new PutRecordsRequest() .withStreamName(streamName) .withRecords(this.kinesisBatch)); result.getRecords().forEach(r -> { if (!(StringUtils.hasValue(r.getErrorCode()))) { String successMessage = "Successfully processed record with sequence number: " + r.getSequenceNumber() + ", shard id: " + r.getShardId(); logger.log(successMessage); } else { this.documentAddedCount--; String errorMessage = "Did not process record with sequence number: " + r.getSequenceNumber() + ", error code: " + r.getErrorCode() + ", error message: " + r.getErrorMessage(); logger.log(errorMessage); this.isError = true; } }); // You may also implement a retry logic only for failed records (e.g. Create a list for failed records, // add error records to that list and finally retry all failed records until a max retry count is reached.) /* if (result.getFailedRecordCount() != null && result.getFailedRecordCount() > 0) { result.getRecords().forEach(r -> { if ((r != null) && (StringUtils.hasValue(r.getErrorCode()))) { // add this record to the retry list. } }); } */ }
Example #14
Source File: TestMinimalFunctionality.java From presto with Apache License 2.0 | 5 votes |
private void createMessages(String streamName, long count) { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(streamName); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); for (int i = 0; i < count; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setData(ByteBuffer.wrap(UUID.randomUUID().toString().getBytes(UTF_8))); putRecordsRequestEntry.setPartitionKey(Long.toString(i)); putRecordsRequestEntryList.add(putRecordsRequestEntry); } putRecordsRequest.setRecords(putRecordsRequestEntryList); embeddedKinesisStream.getKinesisClient().putRecords(putRecordsRequest); }
Example #15
Source File: AmazonKinesisMock.java From beam with Apache License 2.0 | 4 votes |
@Override public PutRecordsResult putRecords(PutRecordsRequest putRecordsRequest) { throw new RuntimeException("Not implemented"); }
Example #16
Source File: ZPublisher.java From bidder with Apache License 2.0 | 4 votes |
/** * Run the kineses logger in a loop */ public void runKinesisLogger() { Object msg = null; String str = null; int i; List <PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); while (!me.isInterrupted()) { try { if ((msg = queue.poll()) != null) { i = 1; PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); while(msg != null) { str = serialize(msg); byte [] bytes = str.getBytes(); PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setPartitionKey(kinesis.getPartition()); putRecordsRequestEntry.setData(ByteBuffer.wrap(bytes)); putRecordsRequestEntryList.add(putRecordsRequestEntry); if (i++ == 100) msg = null; else msg = queue.poll(); } putRecordsRequest.setRecords(putRecordsRequestEntryList); putRecordsRequest.setStreamName(kinesis.getStream()); PutRecordsResult putRecordsResult = kinesis.getKinesis().putRecords(putRecordsRequest); putRecordsRequestEntryList.clear(); } Thread.sleep(1); /*while ((msg = queue.poll()) != null) { str = serialize(msg); var bytes = str.getBytes(); PutRecordRequest putRecord = new PutRecordRequest(); putRecord.setStreamName(kinesis.getStream()); putRecord.setPartitionKey(kinesis.getPartition()); putRecord.setData(ByteBuffer.wrap(bytes)); try { kinesis.getKinesis().putRecord(putRecord); } catch (Exception ex) { ex.printStackTrace(); } } Thread.sleep(1); */ } catch (Exception e) { e.printStackTrace(); // return; } } }
Example #17
Source File: CamelSourceAWSKinesisITCase.java From camel-kafka-connector with Apache License 2.0 | 4 votes |
private void putRecords() { PutRecordsRequest putRecordsRequest = new PutRecordsRequest(); putRecordsRequest.setStreamName(AWSCommon.DEFAULT_KINESIS_STREAM); List<PutRecordsRequestEntry> putRecordsRequestEntryList = new ArrayList<>(); LOG.debug("Adding data to the Kinesis stream"); for (int i = 0; i < expect; i++) { PutRecordsRequestEntry putRecordsRequestEntry = new PutRecordsRequestEntry(); putRecordsRequestEntry.setData(ByteBuffer.wrap(String.valueOf(i).getBytes())); String partition = String.format("partitionKey-%d", i); putRecordsRequestEntry.setPartitionKey(partition); LOG.debug("Added data {} (as bytes) to partition {}", i, partition); putRecordsRequestEntryList.add(putRecordsRequestEntry); } LOG.debug("Done creating the data records"); int retries = 5; do { try { putRecordsRequest.setRecords(putRecordsRequestEntryList); PutRecordsResult putRecordsResult = awsKinesisClient.putRecords(putRecordsRequest); if (putRecordsResult.getFailedRecordCount() == 0) { LOG.debug("Done putting the data records into the stream"); } else { fail("Unable to put all the records into the stream"); } break; } catch (AmazonServiceException e) { retries--; /* This works around the "... Cannot deserialize instance of `...AmazonKinesisException` out of NOT_AVAILABLE token It may take some time for the local Kinesis backend to be fully up - even though the container is reportedly up and running. Therefore, it tries a few more times */ LOG.trace("Failed to put the records: {}. Retrying in 2 seconds ...", e.getMessage()); if (retries == 0) { LOG.error("Failed to put the records: {}", e.getMessage(), e); throw e; } try { Thread.sleep(TimeUnit.SECONDS.toMillis(2)); } catch (InterruptedException ex) { break; } } } while (retries > 0); }
Example #18
Source File: BatchedClickEventsToKinesis.java From aws-big-data-blog with Apache License 2.0 | 4 votes |
protected void flush() { kinesis.putRecords(new PutRecordsRequest() .withStreamName(STREAM_NAME) .withRecords(entries)); entries.clear(); }