com.amazonaws.services.kinesis.producer.Attempt Java Examples
The following examples show how to use
com.amazonaws.services.kinesis.producer.Attempt.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKinesisProducer.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Check if there are any asynchronous exceptions. If so, rethrow the exception. */ private void checkAndPropagateAsyncError() throws Exception { if (thrownException != null) { String errorMessages = ""; if (thrownException instanceof UserRecordFailedException) { List<Attempt> attempts = ((UserRecordFailedException) thrownException).getResult().getAttempts(); for (Attempt attempt: attempts) { if (attempt.getErrorMessage() != null) { errorMessages += attempt.getErrorMessage() + "\n"; } } } if (failOnError) { throw new RuntimeException("An exception was thrown while processing a record: " + errorMessages, thrownException); } else { LOG.warn("An exception was thrown while processing a record: {}", thrownException, errorMessages); // reset, prevent double throwing thrownException = null; } } }
Example #2
Source File: FlinkKinesisProducer.java From flink with Apache License 2.0 | 6 votes |
/** * Check if there are any asynchronous exceptions. If so, rethrow the exception. */ private void checkAndPropagateAsyncError() throws Exception { if (thrownException != null) { String errorMessages = ""; if (thrownException instanceof UserRecordFailedException) { List<Attempt> attempts = ((UserRecordFailedException) thrownException).getResult().getAttempts(); for (Attempt attempt: attempts) { if (attempt.getErrorMessage() != null) { errorMessages += attempt.getErrorMessage() + "\n"; } } } if (failOnError) { throw new RuntimeException("An exception was thrown while processing a record: " + errorMessages, thrownException); } else { LOG.warn("An exception was thrown while processing a record: {}", thrownException, errorMessages); // reset, prevent double throwing thrownException = null; } } }
Example #3
Source File: KinesisEventProducer.java From koupler with MIT License | 6 votes |
public void send(String event) throws UnsupportedEncodingException { byte[] bytes = event.getBytes("UTF-8"); this.metrics.queueEvent(bytes.length); ByteBuffer data = ByteBuffer.wrap(bytes); String partitionKey = getPartitionKey(event); if (partitionKey != null) { ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, partitionKey, data); Futures.addCallback(f, new FutureCallback<UserRecordResult>() { @Override public void onFailure(Throwable t) { if (t instanceof UserRecordFailedException) { Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts()); LOGGER.error(String.format("Record failed to put - %s : %s", last.getErrorCode(), last.getErrorMessage())); } LOGGER.error("Exception during put", t); } @Override public void onSuccess(UserRecordResult result) { metrics.ackEvent(); } }); } }
Example #4
Source File: KinesisTarget.java From datacollector with Apache License 2.0 | 6 votes |
private void getAndCheck(Future<UserRecordResult> future) throws StageException { try { UserRecordResult result = future.get(); if (!result.isSuccessful()) { for (Attempt attempt : result.getAttempts()) { LOG.error("Failed to put record: {}", attempt.getErrorMessage()); } throw new StageException(Errors.KINESIS_00, result.getAttempts().get(0).getErrorMessage()); } if (responseConf.sendResponseToOrigin && this.responseConf.responseType.equals(ResponseType.DESTINATION_RESPONSE)) { getContext().toSourceResponse(generateDestResponseRecord(result)); } } catch (InterruptedException | ExecutionException e) { LOG.error("Pipeline is shutting down.", e); // We should flush if we encounter an error. kinesisProducer.flushSync(); } }
Example #5
Source File: KinesisTarget.java From datacollector with Apache License 2.0 | 6 votes |
private Record generateDestResponseRecord(UserRecordResult result) { Record record = getContext().createRecord("responseRecord"); List<Field> attemptsVal = new ArrayList<>(); for(Attempt attempt: result.getAttempts()) { LinkedHashMap<String, Field> attemptVal = new LinkedHashMap<>(); attemptVal.put("delay", Field.create(attempt.getDelay())); attemptVal.put("duration", Field.create(attempt.getDuration())); attemptVal.put("errorMessage", Field.create(attempt.getErrorMessage())); attemptVal.put("errorCode", Field.create(attempt.getErrorCode())); attemptVal.put("success", Field.create(attempt.isSuccessful())); attemptsVal.add(Field.createListMap(attemptVal)); } LinkedHashMap<String, Field> resultVal = new LinkedHashMap<>(); resultVal.put("sequenceNumber", Field.create(result.getSequenceNumber())); resultVal.put("shardId", Field.create(result.getShardId())); resultVal.put("successful", Field.create(result.isSuccessful())); resultVal.put("attempts", Field.create(attemptsVal)); record.set(Field.createListMap(resultVal)); return record; }
Example #6
Source File: FlinkKinesisProducer.java From flink with Apache License 2.0 | 6 votes |
/** * Check if there are any asynchronous exceptions. If so, rethrow the exception. */ private void checkAndPropagateAsyncError() throws Exception { if (thrownException != null) { String errorMessages = ""; if (thrownException instanceof UserRecordFailedException) { List<Attempt> attempts = ((UserRecordFailedException) thrownException).getResult().getAttempts(); for (Attempt attempt: attempts) { if (attempt.getErrorMessage() != null) { errorMessages += attempt.getErrorMessage() + "\n"; } } } if (failOnError) { throw new RuntimeException("An exception was thrown while processing a record: " + errorMessages, thrownException); } else { LOG.warn("An exception was thrown while processing a record: {}.", errorMessages, thrownException); // reset, prevent double throwing thrownException = null; } } }
Example #7
Source File: Stream.java From amazon-neptune-tools with Apache License 2.0 | 5 votes |
private static String formatAttempts(List<Attempt> attempts) { StringBuilder builder = new StringBuilder(); for (Attempt attempt : attempts) { builder.append("["); builder.append(attempt.getErrorCode()).append(":").append(attempt.getErrorMessage()); builder.append("(").append(attempt.getDelay()).append(",").append(attempt.getDuration()).append(")"); builder.append("]"); } return builder.toString(); }
Example #8
Source File: AmazonKinesisSinkTask.java From kinesis-kafka-connector with Apache License 2.0 | 5 votes |
@Override public void onFailure(Throwable t) { if (t instanceof UserRecordFailedException) { Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts()); throw new DataException("Kinesis Producer was not able to publish data - " + last.getErrorCode() + "-" + last.getErrorMessage()); } throw new DataException("Exception during Kinesis put", t); }
Example #9
Source File: KinesisIO.java From beam with Apache License 2.0 | 5 votes |
/** If any write has asynchronously failed, fail the bundle with a useful error. */ private void checkForFailures(String message) throws IOException { if (failures.isEmpty()) { return; } StringBuilder logEntry = new StringBuilder(); logEntry.append(message).append(System.lineSeparator()); int i = 0; while (!failures.isEmpty()) { i++; KinesisWriteException exc = failures.remove(); logEntry.append(System.lineSeparator()).append(exc.getMessage()); Throwable cause = exc.getCause(); if (cause != null) { logEntry.append(": ").append(cause.getMessage()); if (cause instanceof UserRecordFailedException) { List<Attempt> attempts = ((UserRecordFailedException) cause).getResult().getAttempts(); for (Attempt attempt : attempts) { if (attempt.getErrorMessage() != null) { logEntry.append(System.lineSeparator()).append(attempt.getErrorMessage()); } } } } } String errorMessage = String.format( "Some errors occurred writing to Kinesis. First %d errors: %s", i, logEntry.toString()); throw new IOException(errorMessage); }
Example #10
Source File: SampleProducer.java From real-time-analytics-spark-streaming with Apache License 2.0 | 4 votes |
/** The main method. * @param args The command line args for the Sample Producer. It takes 3 optional position parameters: * 1. The stream name to use (default-data-stream is default) * 2. The region name to use (us-east-1 is default) * 3. The duration of the test in seconds, 5 is the default. */ public static void main(String[] args) throws Exception { final String streamName = getArgIfPresent(args, 0, STREAM_NAME); final String region = getArgIfPresent(args, 1, REGION); final String secondsToRunString = getArgIfPresent(args, 2, String.valueOf(SECONDS_TO_RUN_DEFAULT)); final int secondsToRun = Integer.parseInt(secondsToRunString); if (secondsToRun <= 0) { LOGGER.error("Seconds to Run should be a positive integer"); System.exit(1); } final KinesisProducer producer = getKinesisProducer(region); final AtomicLong sequenceNumber = new AtomicLong(0); final AtomicLong completed = new AtomicLong(0); FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() { @Override public void onFailure(Throwable t) { // If we see any failures, we will log them. if (t instanceof UserRecordFailedException) { Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts()); LOGGER.error(String.format("Record failed to put - %s : %s", last.getErrorCode(), last.getErrorMessage())); } LOGGER.error("Exception during put", t); }; @Override public void onSuccess(UserRecordResult result) { completed.getAndIncrement(); }; }; final ExecutorService callbackThreadPool = Executors.newCachedThreadPool(); // The lines within run() are the essence of the KPL API. final Runnable putOneRecord = new Runnable() { @Override public void run() { ByteBuffer data = generateData(); // TIMESTAMP is our partition key ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP, randomExplicitHashKey(), data); Futures.addCallback(f, callback, callbackThreadPool); } }; EXECUTOR.scheduleAtFixedRate(new Runnable() { @Override public void run() { long put = sequenceNumber.get(); long total = RECORDS_PER_SECOND * secondsToRun; double putPercent = 100.0 * put / total; long done = completed.get(); double donePercent = 100.0 * done / total; LOGGER.info(String.format( "Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total, putPercent, done, donePercent )); } }, 1, 1, TimeUnit.SECONDS); executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, RECORDS_PER_SECOND); EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS); LOGGER.info("Waiting for remaining puts to finish..."); producer.flushSync(); LOGGER.info("All records complete."); producer.destroy(); LOGGER.info("Finished."); }
Example #11
Source File: SampleKPLProducer.java From kinesis-aggregation with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (args.length != 2) { System.err.println("Usage SampleKPLProducer <stream name> <region>"); System.exit(1); } String streamName = args[0]; String regionName = args[1]; final KinesisProducer producer = getKinesisProducer(regionName); final AtomicLong sequenceNumber = new AtomicLong(0); final AtomicLong completed = new AtomicLong(0); final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() { @Override public void onFailure(Throwable t) { if (t instanceof UserRecordFailedException) { Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts()); System.err.println(String.format("Record failed to put - %s : %s", last.getErrorCode(), last.getErrorMessage())); } System.err.println("Exception during put: " + t.getMessage()); t.printStackTrace(); System.exit(1); } @Override public void onSuccess(UserRecordResult result) { completed.getAndIncrement(); } }; final Runnable putOneRecord = new Runnable() { @Override public void run() { byte[] data = ProducerUtils.randomData(sequenceNumber.get(), ProducerConfig.RECORD_SIZE_BYTES); ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, ProducerUtils.randomPartitionKey(), ProducerUtils.randomExplicitHashKey(), ByteBuffer.wrap(data)); Futures.addCallback(f, callback); } }; EXECUTOR.scheduleAtFixedRate(new Runnable() { @Override public void run() { long put = sequenceNumber.get(); long total = RECORDS_PER_SECOND * SECONDS_TO_RUN; double putPercent = 100.0 * put / total; long done = completed.get(); double donePercent = 100.0 * done / total; System.out.println(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total, putPercent, done, donePercent)); } }, 1, 1, TimeUnit.SECONDS); System.out.println(String.format("Starting puts... will run for %d seconds at %d records per second", SECONDS_TO_RUN, RECORDS_PER_SECOND)); executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, SECONDS_TO_RUN, RECORDS_PER_SECOND); EXECUTOR.awaitTermination(SECONDS_TO_RUN + 1, TimeUnit.SECONDS); System.out.println("Waiting for remaining puts to finish..."); producer.flushSync(); System.out.println("All records complete."); producer.destroy(); System.out.println("Finished."); }