Java Code Examples for com.amazonaws.services.dynamodbv2.model.ConsumedCapacity#getCapacityUnits()
The following examples show how to use
com.amazonaws.services.dynamodbv2.model.ConsumedCapacity#getCapacityUnits() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: QueryWorker.java From dynamodb-janusgraph-storage-backend with Apache License 2.0 | 6 votes |
@Override public QueryResultWrapper next() throws BackendException { final Query backoff = new ExponentialBackoff.Query(request, delegate, permitsToConsume); final QueryResult result = backoff.runWithBackoff(); final ConsumedCapacity consumedCapacity = result.getConsumedCapacity(); if (null != consumedCapacity) { permitsToConsume = Math.max((int) (consumedCapacity.getCapacityUnits() - 1.0), 1); totalCapacityUnits += consumedCapacity.getCapacityUnits(); } if (result.getLastEvaluatedKey() != null && !result.getLastEvaluatedKey().isEmpty()) { request.setExclusiveStartKey(result.getLastEvaluatedKey()); } else { markComplete(); } // a update returned count returnedCount += result.getCount(); // b update scanned count scannedCount += result.getScannedCount(); // c add scanned finalItemList finalItemList.addAll(result.getItems()); return new QueryResultWrapper(titanKey, result); }
Example 2
Source File: AbstractDynamoDBRecordWriter.java From emr-dynamodb-connector with Apache License 2.0 | 5 votes |
@Override public void write(K key, V value) throws IOException { if (value == null) { throw new RuntimeException("Null record encountered. At least the key columns must be " + "specified."); } verifyInterval(); if (progressable != null) { progressable.progress(); } DynamoDBItemWritable item = convertValueToDynamoDBItem(key, value); BatchWriteItemResult result = client.putBatch(tableName, item.getItem(), permissibleWritesPerSecond - writesPerSecond, reporter, deletionMode); batchSize++; totalItemsWritten++; if (result != null) { if (result.getConsumedCapacity() != null) { for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) { double consumedUnits = consumedCapacity.getCapacityUnits(); totalIOPSConsumed += consumedUnits; } } int unprocessedItems = 0; for (List<WriteRequest> requests : result.getUnprocessedItems().values()) { unprocessedItems += requests.size(); } writesPerSecond += batchSize - unprocessedItems; batchSize = unprocessedItems; } }
Example 3
Source File: ScanSegmentWorker.java From dynamodb-import-export-tool with Apache License 2.0 | 5 votes |
@Override public SegmentedScanResult call() { ScanResult result = null; result = runWithBackoff(); final ConsumedCapacity cc = result.getConsumedCapacity(); if (cc != null && cc.getCapacityUnits() != null) { lastConsumedCapacity = result.getConsumedCapacity() .getCapacityUnits().intValue(); } else if (result.getScannedCount() != null && result.getCount() != null) { final boolean isConsistent = request.getConsistentRead(); int itemSize = isConsistent ? BootstrapConstants.STRONGLY_CONSISTENT_READ_ITEM_SIZE : BootstrapConstants.EVENTUALLY_CONSISTENT_READ_ITEM_SIZE; lastConsumedCapacity = (result.getScannedCount() / (int) Math.max(1.0, result.getCount())) * (ItemSizeCalculator.calculateScanResultSizeInBytes(result) / itemSize); } if (result.getLastEvaluatedKey() != null && !result.getLastEvaluatedKey().isEmpty()) { hasNext = true; request.setExclusiveStartKey(result.getLastEvaluatedKey()); } else { hasNext = false; } if (lastConsumedCapacity > 0) { rateLimiter.acquire(lastConsumedCapacity); } return new SegmentedScanResult(result, request.getSegment()); }
Example 4
Source File: LowLevelBatchWrite.java From aws-doc-sdk-examples with Apache License 2.0 | 4 votes |
private static void writeMultipleItemsBatchWrite() { try { // Create a map for the requests in the batch Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>(); // Create a PutRequest for a new Forum item Map<String, AttributeValue> forumItem = new HashMap<String, AttributeValue>(); forumItem.put("Name", new AttributeValue().withS("Amazon RDS")); forumItem.put("Threads", new AttributeValue().withN("0")); List<WriteRequest> forumList = new ArrayList<WriteRequest>(); forumList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(forumItem))); requestItems.put(table1Name, forumList); // Create a PutRequest for a new Thread item Map<String, AttributeValue> threadItem = new HashMap<String, AttributeValue>(); threadItem.put("ForumName", new AttributeValue().withS("Amazon RDS")); threadItem.put("Subject", new AttributeValue().withS("Amazon RDS Thread 1")); threadItem.put("Message", new AttributeValue().withS("ElastiCache Thread 1 message")); threadItem.put("KeywordTags", new AttributeValue().withSS(Arrays.asList("cache", "in-memory"))); List<WriteRequest> threadList = new ArrayList<WriteRequest>(); threadList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(threadItem))); // Create a DeleteRequest for a Thread item Map<String, AttributeValue> threadDeleteKey = new HashMap<String, AttributeValue>(); threadDeleteKey.put("ForumName", new AttributeValue().withS("Amazon S3")); threadDeleteKey.put("Subject", new AttributeValue().withS("S3 Thread 100")); threadList.add(new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(threadDeleteKey))); requestItems.put(table2Name, threadList); BatchWriteItemResult result; BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest() .withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL); do { System.out.println("Making the request."); batchWriteItemRequest.withRequestItems(requestItems); result = client.batchWriteItem(batchWriteItemRequest); // Print consumed capacity units for (ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) { String tableName = consumedCapacity.getTableName(); Double consumedCapacityUnits = consumedCapacity.getCapacityUnits(); System.out.println("Consumed capacity units for table " + tableName + ": " + consumedCapacityUnits); } // Check for unprocessed keys which could happen if you exceed // provisioned throughput System.out.println("Unprocessed Put and Delete requests: \n" + result.getUnprocessedItems()); requestItems = result.getUnprocessedItems(); } while (result.getUnprocessedItems().size() > 0); } catch (AmazonServiceException ase) { System.err.println("Failed to retrieve items: "); ase.printStackTrace(System.err); } }
Example 5
Source File: LowLevelBatchWrite.java From aws-dynamodb-examples with Apache License 2.0 | 4 votes |
private static void writeMultipleItemsBatchWrite() { try { // Create a map for the requests in the batch Map<String, List<WriteRequest>> requestItems = new HashMap<String, List<WriteRequest>>(); // Create a PutRequest for a new Forum item Map<String, AttributeValue> forumItem = new HashMap<String, AttributeValue>(); forumItem.put("Name", new AttributeValue().withS("Amazon RDS")); forumItem.put("Threads", new AttributeValue().withN("0")); List<WriteRequest> forumList = new ArrayList<WriteRequest>(); forumList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(forumItem))); requestItems.put(table1Name, forumList); // Create a PutRequest for a new Thread item Map<String, AttributeValue> threadItem = new HashMap<String, AttributeValue>(); threadItem.put("ForumName", new AttributeValue().withS("Amazon RDS")); threadItem.put("Subject", new AttributeValue().withS("Amazon RDS Thread 1")); threadItem.put("Message", new AttributeValue().withS("ElasticCache Thread 1 message")); threadItem.put("KeywordTags", new AttributeValue().withSS(Arrays.asList("cache", "in-memory"))); List<WriteRequest> threadList = new ArrayList<WriteRequest>(); threadList.add(new WriteRequest().withPutRequest(new PutRequest().withItem(threadItem))); // Create a DeleteRequest for a Thread item Map<String, AttributeValue> threadDeleteKey = new HashMap<String, AttributeValue>(); threadDeleteKey.put("ForumName", new AttributeValue().withS("Amazon S3")); threadDeleteKey.put("Subject", new AttributeValue().withS("S3 Thread 100")); threadList.add(new WriteRequest().withDeleteRequest(new DeleteRequest().withKey(threadDeleteKey))); requestItems.put(table2Name, threadList); BatchWriteItemResult result; BatchWriteItemRequest batchWriteItemRequest = new BatchWriteItemRequest() .withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL); do { System.out.println("Making the request."); batchWriteItemRequest.withRequestItems(requestItems); result = client.batchWriteItem(batchWriteItemRequest); // Print consumed capacity units for(ConsumedCapacity consumedCapacity : result.getConsumedCapacity()) { String tableName = consumedCapacity.getTableName(); Double consumedCapacityUnits = consumedCapacity.getCapacityUnits(); System.out.println("Consumed capacity units for table " + tableName + ": " + consumedCapacityUnits); } // Check for unprocessed keys which could happen if you exceed provisioned throughput System.out.println("Unprocessed Put and Delete requests: \n" + result.getUnprocessedItems()); requestItems = result.getUnprocessedItems(); } while (result.getUnprocessedItems().size() > 0); } catch (AmazonServiceException ase) { System.err.println("Failed to retrieve items: "); ase.printStackTrace(System.err); } }