com.amazonaws.services.dynamodbv2.document.DynamoDB Scala Examples
The following examples show how to use com.amazonaws.services.dynamodbv2.document.DynamoDB.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DynamoBatchWriter.scala From spark-dynamodb with Apache License 2.0 | 5 votes |
package com.audienceproject.spark.dynamodb.datasource import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.audienceproject.shaded.google.common.util.concurrent.RateLimiter import com.audienceproject.spark.dynamodb.connector.{ColumnSchema, TableConnector} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.sources.v2.writer.{DataWriter, WriterCommitMessage} import scala.collection.mutable.ArrayBuffer class DynamoBatchWriter(batchSize: Int, columnSchema: ColumnSchema, connector: TableConnector, client: DynamoDB) extends DataWriter[InternalRow] { protected val buffer: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow](batchSize) protected val rateLimiter: RateLimiter = RateLimiter.create(connector.writeLimit) override def write(record: InternalRow): Unit = { buffer += record.copy() if (buffer.size == batchSize) { flush() } } override def commit(): WriterCommitMessage = { flush() new WriterCommitMessage {} } override def abort(): Unit = {} protected def flush(): Unit = { if (buffer.nonEmpty) { connector.putItems(columnSchema, buffer)(client, rateLimiter) buffer.clear() } } }
Example 2
Source File: DynamoBatchDeleteWriter.scala From spark-dynamodb with Apache License 2.0 | 5 votes |
package com.audienceproject.spark.dynamodb.datasource import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.audienceproject.spark.dynamodb.connector.{ColumnSchema, TableConnector} class DynamoBatchDeleteWriter(batchSize: Int, columnSchema: ColumnSchema, connector: TableConnector, client: DynamoDB) extends DynamoBatchWriter(batchSize, columnSchema, connector, client) { protected override def flush(): Unit = { if (buffer.nonEmpty) { connector.deleteItems(columnSchema, buffer)(client, rateLimiter) buffer.clear() } } }
Example 3
Source File: RegionTest.scala From spark-dynamodb with Apache License 2.0 | 5 votes |
package com.audienceproject.spark.dynamodb import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.dynamodbv2.{AmazonDynamoDB, AmazonDynamoDBClientBuilder} import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.model.{AttributeDefinition, CreateTableRequest, KeySchemaElement, ProvisionedThroughput} import com.audienceproject.spark.dynamodb.implicits._ class RegionTest extends AbstractInMemoryTest { test("Inserting from a local Dataset") { val tableName = "RegionTest1" dynamoDB.createTable(new CreateTableRequest() .withTableName(tableName) .withAttributeDefinitions(new AttributeDefinition("name", "S")) .withKeySchema(new KeySchemaElement("name", "HASH")) .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L))) val client: AmazonDynamoDB = AmazonDynamoDBClientBuilder.standard() .withEndpointConfiguration(new EndpointConfiguration(System.getProperty("aws.dynamodb.endpoint"), "eu-central-1")) .build() val dynamoDBEU: DynamoDB = new DynamoDB(client) dynamoDBEU.createTable(new CreateTableRequest() .withTableName(tableName) .withAttributeDefinitions(new AttributeDefinition("name", "S")) .withKeySchema(new KeySchemaElement("name", "HASH")) .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L))) import spark.implicits._ val newItemsDs = spark.createDataset(Seq( ("lemon", "yellow", 0.1), ("orange", "orange", 0.2), ("pomegranate", "red", 0.2) )) .withColumnRenamed("_1", "name") .withColumnRenamed("_2", "color") .withColumnRenamed("_3", "weight") newItemsDs.write.option("region","eu-central-1").dynamodb(tableName) val validationDs = spark.read.dynamodb(tableName) assert(validationDs.count() === 0) val validationDsEU = spark.read.option("region","eu-central-1").dynamodb(tableName) assert(validationDsEU.count() === 3) } }
Example 4
Source File: AbstractInMemoryTest.scala From spark-dynamodb with Apache License 2.0 | 5 votes |
package com.audienceproject.spark.dynamodb import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.dynamodbv2.document.{DynamoDB, Item} import com.amazonaws.services.dynamodbv2.local.main.ServerRunner import com.amazonaws.services.dynamodbv2.local.server.DynamoDBProxyServer import com.amazonaws.services.dynamodbv2.model.{AttributeDefinition, CreateTableRequest, KeySchemaElement, ProvisionedThroughput} import com.amazonaws.services.dynamodbv2.{AmazonDynamoDB, AmazonDynamoDBClientBuilder} import org.apache.spark.sql.SparkSession import org.scalatest.{BeforeAndAfterAll, FunSuite} class AbstractInMemoryTest extends FunSuite with BeforeAndAfterAll { val server: DynamoDBProxyServer = ServerRunner.createServerFromCommandLineArgs(Array("-inMemory")) val client: AmazonDynamoDB = AmazonDynamoDBClientBuilder.standard() .withEndpointConfiguration(new EndpointConfiguration(System.getProperty("aws.dynamodb.endpoint"), "us-east-1")) .build() val dynamoDB: DynamoDB = new DynamoDB(client) val spark: SparkSession = SparkSession.builder .master("local") .appName(this.getClass.getName) .getOrCreate() spark.sparkContext.setLogLevel("ERROR") override def beforeAll(): Unit = { server.start() // Create a test table. dynamoDB.createTable(new CreateTableRequest() .withTableName("TestFruit") .withAttributeDefinitions(new AttributeDefinition("name", "S")) .withKeySchema(new KeySchemaElement("name", "HASH")) .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L))) // Populate with test data. val table = dynamoDB.getTable("TestFruit") for ((name, color, weight) <- Seq( ("apple", "red", 0.2), ("banana", "yellow", 0.15), ("watermelon", "red", 0.5), ("grape", "green", 0.01), ("pear", "green", 0.2), ("kiwi", "green", 0.05), ("blackberry", "purple", 0.01), ("blueberry", "purple", 0.01), ("plum", "purple", 0.1) )) { table.putItem(new Item() .withString("name", name) .withString("color", color) .withDouble("weightKg", weight)) } } override def afterAll(): Unit = { client.deleteTable("TestFruit") server.stop() } }
Example 5
Source File: ClientProvider.scala From reactive-nakadi with MIT License | 5 votes |
package org.zalando.react.nakadi.commit.handlers.aws import com.amazonaws.regions.Regions import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient import org.zalando.react.nakadi.properties.CommitProperties trait Provider { def client: DynamoDB def leaseProperties: CommitProperties } class ClientProvider(override val leaseProperties: CommitProperties) extends Provider { private val credentialsProviderChain = new DefaultAWSCredentialsProviderChain() private val region = Regions.fromName(leaseProperties.awsCommitRegion) override val client: DynamoDB = { val c = new AmazonDynamoDBClient(credentialsProviderChain) c.configureRegion(region) new DynamoDB(c) } }
Example 6
Source File: BaseIntegrationSpec.scala From spark-dynamodb with Apache License 2.0 | 5 votes |
package com.github.traviscrawford.spark.dynamodb import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.document.Item import com.amazonaws.services.dynamodbv2.model._ import org.apache.spark.sql.SparkSession import org.scalatest._ import scala.collection.JavaConversions._ trait BaseIntegrationSpec extends FlatSpec with Matchers { protected val spark = BaseIntegrationSpec.spark protected val LocalDynamoDBPort = System.getProperty("dynamodb.port") protected val LocalDynamoDBEndpoint = s"http://localhost:$LocalDynamoDBPort" protected val TestUsersTableName = "test_users" protected val UserIdKey = "user_id" protected val UsernameKey = "username" protected val CreatedAtKey = "__createdAt" override def withFixture(test: NoArgTest): Outcome = { initializeTestUsersTable() super.withFixture(test) } private def initializeTestUsersTable(): Unit = { val amazonDynamoDBClient = AmazonDynamoDBClientBuilder.standard() .withEndpointConfiguration(new EndpointConfiguration(LocalDynamoDBEndpoint, "us-west-2")) .build() val dynamodb = new DynamoDB(amazonDynamoDBClient) try { dynamodb.getTable(TestUsersTableName).delete() } catch { case _: ResourceNotFoundException => // pass } val createTableRequest = new CreateTableRequest() .withTableName(TestUsersTableName) .withAttributeDefinitions(Seq(new AttributeDefinition(UserIdKey, "N"))) .withKeySchema(Seq(new KeySchemaElement(UserIdKey, "HASH"))) .withProvisionedThroughput(new ProvisionedThroughput(10L, 10L)) val table = dynamodb.createTable(createTableRequest) assert(table.getTableName == TestUsersTableName) val items = Seq( new Item().withNumber(UserIdKey, 1).withString(UsernameKey, "a").withNumber(CreatedAtKey, 11), new Item().withNumber(UserIdKey, 2).withString(UsernameKey, "b").withNumber(CreatedAtKey, 22), new Item().withNumber(UserIdKey, 3).withString(UsernameKey, "c").withNumber(CreatedAtKey, 33)) items.foreach(table.putItem) } } object BaseIntegrationSpec { private val spark = SparkSession.builder .master("local") .appName(this.getClass.getName) .getOrCreate() }