com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer Java Examples
The following examples show how to use
com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisConnectorRecordProcessorTests.java From amazon-kinesis-connectors with Apache License 2.0 | 6 votes |
@Before @SuppressWarnings("unchecked") public void setUp() { // control object used to create mock dependencies control = EasyMock.createControl(); // mock dependencies emitter = control.createMock(IEmitter.class); transformer = control.createMock(ITransformer.class); buffer = control.createMock(IBuffer.class); filter = control.createMock(IFilter.class); checkpointer = control.createMock(IRecordProcessorCheckpointer.class); // use a real configuration to get actual default values (not anything created by EasyMock) configuration = new KinesisConnectorConfiguration(new Properties(), new DefaultAWSCredentialsProviderChain()); }
Example #2
Source File: KinesisClientLibraryPipelinedRecordProcessor.java From amazon-kinesis-connectors with Apache License 2.0 | 6 votes |
@Override public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { LOG.info("Shutting down pipelined processor for shard: " + shardId + " with reason:" + reason); queueConsumer.shutdown = true; try { if (queueConsumerExecutor.awaitTermination(maxProcessRecordsWaitTimeMs, TimeUnit.MILLISECONDS)) { List<Record> records = new ArrayList<Record>(); recordQueue.drainTo(records); // No need to protect the checkpointer any longer. Record processing is in sync with record fetching. recordProcessor.processRecords(records, checkpointer); recordProcessor.shutdown(checkpointer, reason); } else { LOG.warn("Queue consumer took longer than " + maxProcessRecordsWaitTimeMs + " ms to complete. Shutdown task failed."); } } catch (InterruptedException e) { LOG.error("Interrupted while draining queue", e); Thread.currentThread().interrupt(); } }
Example #3
Source File: TestKinesisRecordProcessor.java From samza with Apache License 2.0 | 6 votes |
static Map<KinesisRecordProcessor, List<Record>> generateRecords(int numRecordsPerShard, List<KinesisRecordProcessor> processors) { Map<KinesisRecordProcessor, List<Record>> processorRecordMap = new HashMap<>(); processors.forEach(processor -> { try { // Create records and call process records IRecordProcessorCheckpointer checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class); doNothing().when(checkpointer).checkpoint(anyString()); doNothing().when(checkpointer).checkpoint(); ProcessRecordsInput processRecordsInput = Mockito.mock(ProcessRecordsInput.class); when(processRecordsInput.getCheckpointer()).thenReturn(checkpointer); when(processRecordsInput.getMillisBehindLatest()).thenReturn(1000L); List<Record> inputRecords = createRecords(numRecordsPerShard); processorRecordMap.put(processor, inputRecords); when(processRecordsInput.getRecords()).thenReturn(inputRecords); processor.processRecords(processRecordsInput); } catch (ShutdownException | InvalidStateException ex) { throw new RuntimeException(ex); } }); return processorRecordMap; }
Example #4
Source File: StreamSetsRecordProcessor.java From datacollector with Apache License 2.0 | 6 votes |
private void retryCheckpoint(IRecordProcessorCheckpointer checkpointer, Record checkpointRecord) throws InvalidStateException, ShutdownException, InterruptedException { if (throttleMaxRetries > 0) { LOG.debug("Retry checkpointing batch at record: {}", checkpointRecord.toString()); int retryCount = 0; boolean success = false; while (retryCount < throttleMaxRetries && !success) { Thread.sleep(throttleWaitTime); try { checkpointer.checkpoint(checkpointRecord); success = true; } catch (ThrottlingException te) {} retryCount++; } if (success) { LOG.debug("Successfully checkpointed at record {}, after {} retries.", checkpointRecord.toString(), retryCount); } else { LOG.debug("Could not checkpoint batch at record {} after {} retries.", checkpointRecord.toString(), retryCount); } } }
Example #5
Source File: MyRecordProcessor.java From aws-big-data-blog with Apache License 2.0 | 6 votes |
@Override public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { LOG.info(String.format("Received %s Records", records.size())); // add a call to your business logic here! // // myLinkedClasses.doSomething(records) // // try { checkpointer.checkpoint(); } catch (KinesisClientLibDependencyException | InvalidStateException | ThrottlingException | ShutdownException e) { e.printStackTrace(); super.shutdown(checkpointer, ShutdownReason.ZOMBIE); } }
Example #6
Source File: KinesisConnectorRecordProcessor.java From amazon-kinesis-connectors with Apache License 2.0 | 6 votes |
@Override public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { LOG.info("Shutting down record processor with shardId: " + shardId + " with reason " + reason); if (isShutdown) { LOG.warn("Record processor for shardId: " + shardId + " has been shutdown multiple times."); return; } switch (reason) { case TERMINATE: emit(checkpointer, transformToOutput(buffer.getRecords())); try { checkpointer.checkpoint(); } catch (KinesisClientLibDependencyException | InvalidStateException | ThrottlingException | ShutdownException e) { LOG.error(e); } break; case ZOMBIE: break; default: throw new IllegalStateException("invalid shutdown reason"); } emitter.shutdown(); isShutdown = true; }
Example #7
Source File: ManagedClientProcessor.java From aws-big-data-blog with Apache License 2.0 | 6 votes |
/** * {@inheritDoc} */ @Override public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { LOG.info("Shutting down record processor for shard: " + kinesisShardId); // Important to checkpoint after reaching end of shard, so we can start // processing data from child shards. if (reason == ShutdownReason.TERMINATE) { try { checkpoint(checkpointer); } catch (Exception e) { e.printStackTrace(); } } }
Example #8
Source File: KinesisRecordProcessor.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { LOG.info("Shutting down record processor for shard: " + kinesisShardId); // Important to checkpoint after reaching end of shard, so we can start processing data from child shards. if (reason == ShutdownReason.TERMINATE) { checkpoint(checkpointer); } }
Example #9
Source File: KinesisClientLibraryPipelinedRecordProcessor.java From amazon-kinesis-connectors with Apache License 2.0 | 5 votes |
@Override public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { queueConsumer.setCheckpointer(checkpointer); for (Record record : records) { try { recordQueue.put(record); } catch (InterruptedException e) { LOG.error("Interrupted while adding a record to the queue", e); Thread.currentThread().interrupt(); } } }
Example #10
Source File: KinesisConnectorRecordProcessor.java From amazon-kinesis-connectors with Apache License 2.0 | 5 votes |
@Override public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { // Note: This method will be called even for empty record lists. This is needed for checking the buffer time // threshold. if (isShutdown) { LOG.warn("processRecords called on shutdown record processor for shardId: " + shardId); return; } if (shardId == null) { throw new IllegalStateException("Record processor not initialized"); } // Transform each Amazon Kinesis Record and add the result to the buffer for (Record record : records) { try { if (transformer instanceof ITransformer) { ITransformer<T, U> singleTransformer = (ITransformer<T, U>) transformer; filterAndBufferRecord(singleTransformer.toClass(record), record); } else if (transformer instanceof ICollectionTransformer) { ICollectionTransformer<T, U> listTransformer = (ICollectionTransformer<T, U>) transformer; Collection<T> transformedRecords = listTransformer.toClass(record); for (T transformedRecord : transformedRecords) { filterAndBufferRecord(transformedRecord, record); } } else { throw new RuntimeException("Transformer must implement ITransformer or ICollectionTransformer"); } } catch (IOException e) { LOG.error(e); } } if (buffer.shouldFlush()) { List<U> emitItems = transformToOutput(buffer.getRecords()); emit(checkpointer, emitItems); } }
Example #11
Source File: StreamsRecordProcessor.java From aws-dynamodb-examples with Apache License 2.0 | 5 votes |
@Override public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { if(reason == ShutdownReason.TERMINATE) { try { checkpointer.checkpoint(); } catch (Exception e) { e.printStackTrace(); } } }
Example #12
Source File: StreamsRecordProcessor.java From aws-dynamodb-examples with Apache License 2.0 | 5 votes |
@Override public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { for(Record record : records) { String data = new String(record.getData().array(), Charset.forName("UTF-8")); System.out.println(data); if(record instanceof RecordAdapter) { com.amazonaws.services.dynamodbv2.model.Record streamRecord = ((RecordAdapter) record).getInternalObject(); switch(streamRecord.getEventName()) { case "INSERT" : case "MODIFY" : StreamsAdapterDemoHelper.putItem(dynamoDBClient, tableName, streamRecord.getDynamodb().getNewImage()); break; case "REMOVE" : StreamsAdapterDemoHelper.deleteItem(dynamoDBClient, tableName, streamRecord.getDynamodb().getKeys().get("Id").getN()); } } checkpointCounter += 1; if(checkpointCounter % 10 == 0) { try { checkpointer.checkpoint(); } catch(Exception e) { e.printStackTrace(); } } } }
Example #13
Source File: StreamsRecordProcessor.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
@Override public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { LOG.info("Shutting down record processor for shard: " + kinesisShardId); // Important to checkpoint after reaching end of shard, so we can start // processing data from child shards. if (reason == ShutdownReason.TERMINATE) { checkpoint(checkpointer); } }
Example #14
Source File: StreamsRecordProcessor.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
@Override public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { LOG.info("Processing " + records.size() + " records from " + kinesisShardId); // Process records and perform all exception handling. try { processRecordsWithRetries(records); checkpoint(checkpointer); } catch (Exception e) { System.err.println("Unhandled Exception while processing record set. Shutdown"); } }
Example #15
Source File: KinesisRecordProcessor.java From aws-big-data-blog with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ public void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer) { LOG.info("Processing " + records.size() + " records from " + kinesisShardId); // Process records and perform all exception handling. processRecordsWithRetries(records); // Checkpoint once every checkpoint interval. if (System.currentTimeMillis() > nextCheckpointTimeInMillis) { checkpoint(checkpointer); nextCheckpointTimeInMillis = System.currentTimeMillis() + CHECKPOINT_INTERVAL_MILLIS; } }
Example #16
Source File: KinesisRecordProcessor.java From amazon-kinesis-video-streams-parser-library with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public void shutdown(final IRecordProcessorCheckpointer checkpointer, final ShutdownReason reason) { LOG.info("Shutting down record processor for shard: " + kinesisShardId); // Important to checkpoint after reaching end of shard, so we can start processing data from child shards. if (reason == ShutdownReason.TERMINATE) { checkpoint(checkpointer); } }
Example #17
Source File: KinesisRecordProcessor.java From amazon-kinesis-video-streams-parser-library with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public void processRecords(final List<Record> records, final IRecordProcessorCheckpointer checkpointer) { LOG.info("Processing " + records.size() + " records from " + kinesisShardId); // Process records and perform all exception handling. processRecordsWithRetries(records); // Checkpoint once every checkpoint interval. if (System.currentTimeMillis() > nextCheckpointTimeInMillis) { checkpoint(checkpointer); nextCheckpointTimeInMillis = System.currentTimeMillis() + CHECKPOINT_INTERVAL_MILLIS; } }
Example #18
Source File: DynamoDBTableReplicator.java From podyn with Apache License 2.0 | 4 votes |
protected IRecordProcessor createStreamProcessor() { return new IRecordProcessor() { @Override public void initialize(InitializationInput initializationInput) { } public List<Record> extractDynamoStreamRecords(List<com.amazonaws.services.kinesis.model.Record> kinesisRecords) { List<Record> dynamoRecords = new ArrayList<>(kinesisRecords.size()); for(com.amazonaws.services.kinesis.model.Record kinesisRecord : kinesisRecords) { if (kinesisRecord instanceof RecordAdapter) { Record dynamoRecord = ((RecordAdapter) kinesisRecord).getInternalObject(); dynamoRecords.add(dynamoRecord); } } return dynamoRecords; } @Override public void processRecords(ProcessRecordsInput processRecordsInput) { List<Record> records = extractDynamoStreamRecords(processRecordsInput.getRecords()); DynamoDBTableReplicator.this.processRecords(records); checkpoint(processRecordsInput.getCheckpointer()); } @Override public void shutdown(ShutdownInput shutdownInput) { if (shutdownInput.getShutdownReason() == ShutdownReason.TERMINATE) { checkpoint(shutdownInput.getCheckpointer()); } } void checkpoint(IRecordProcessorCheckpointer checkpointer) { try { checkpointer.checkpoint(); } catch (KinesisClientLibDependencyException|InvalidStateException|ThrottlingException|ShutdownException e) { LOG.warn(e); } } }; }
Example #19
Source File: ManagedClientProcessor.java From aws-big-data-blog with Apache License 2.0 | 4 votes |
/** * {@inheritDoc} */ @Override public abstract void processRecords(List<Record> records, IRecordProcessorCheckpointer checkpointer);
Example #20
Source File: TestKinesisRecordProcessor.java From samza with Apache License 2.0 | 4 votes |
static IRecordProcessorCheckpointer getCheckpointer(KinesisRecordProcessor processor) throws NoSuchFieldException, IllegalAccessException { Field f = processor.getClass().getDeclaredField("checkpointer"); f.setAccessible(true); return (IRecordProcessorCheckpointer) f.get(processor); }
Example #21
Source File: KinesisClientLibraryPipelinedRecordProcessor.java From amazon-kinesis-connectors with Apache License 2.0 | 4 votes |
public void setCheckpointer(IRecordProcessorCheckpointer checkpointer) { this.checkpointer = protectCheckpointer(checkpointer); }