com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration Scala Examples

The following examples show how to use com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RegionTest.scala    From spark-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.audienceproject.spark.dynamodb

import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.{AmazonDynamoDB, AmazonDynamoDBClientBuilder}
import com.amazonaws.services.dynamodbv2.document.DynamoDB
import com.amazonaws.services.dynamodbv2.model.{AttributeDefinition, CreateTableRequest, KeySchemaElement, ProvisionedThroughput}
import com.audienceproject.spark.dynamodb.implicits._

class RegionTest extends AbstractInMemoryTest {

    test("Inserting from a local Dataset") {
        val tableName = "RegionTest1"
        dynamoDB.createTable(new CreateTableRequest()
            .withTableName(tableName)
            .withAttributeDefinitions(new AttributeDefinition("name", "S"))
            .withKeySchema(new KeySchemaElement("name", "HASH"))
            .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L)))
        val client: AmazonDynamoDB = AmazonDynamoDBClientBuilder.standard()
            .withEndpointConfiguration(new EndpointConfiguration(System.getProperty("aws.dynamodb.endpoint"), "eu-central-1"))
            .build()
        val dynamoDBEU: DynamoDB = new DynamoDB(client)
        dynamoDBEU.createTable(new CreateTableRequest()
            .withTableName(tableName)
            .withAttributeDefinitions(new AttributeDefinition("name", "S"))
            .withKeySchema(new KeySchemaElement("name", "HASH"))
            .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L)))

        import spark.implicits._

        val newItemsDs = spark.createDataset(Seq(
            ("lemon", "yellow", 0.1),
            ("orange", "orange", 0.2),
            ("pomegranate", "red", 0.2)
        ))
            .withColumnRenamed("_1", "name")
            .withColumnRenamed("_2", "color")
            .withColumnRenamed("_3", "weight")
        newItemsDs.write.option("region","eu-central-1").dynamodb(tableName)

        val validationDs = spark.read.dynamodb(tableName)
        assert(validationDs.count() === 0)
        val validationDsEU = spark.read.option("region","eu-central-1").dynamodb(tableName)
        assert(validationDsEU.count() === 3)
    }

} 
Example 2
Source File: S3Utils.scala    From elastiknn   with Apache License 2.0 5 votes vote down vote up
package com.klibisz.elastiknn.benchmarks

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}

object S3Utils {

  
  def minioClient(): AmazonS3 = {
    val endpointConfig = new EndpointConfiguration("http://localhost:9000", "us-east-1")
    val clientConfig = new ClientConfiguration()
    clientConfig.setSignerOverride("AWSS3V4SignerType")
    AmazonS3ClientBuilder.standard
      .withPathStyleAccessEnabled(true)
      .withEndpointConfiguration(endpointConfig)
      .withClientConfiguration(clientConfig)
      .withCredentials(new AWSStaticCredentialsProvider(new AWSCredentials {
        override def getAWSAccessKeyId: String = "minioadmin"
        override def getAWSSecretKey: String = "minioadmin"
      }))
      .build()
  }

  def defaultClient(): AmazonS3 = AmazonS3ClientBuilder.defaultClient()

} 
Example 3
Source File: DynamoDBClient.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.dynamodb.repository

import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.{AmazonDynamoDBClient, AmazonDynamoDBClientBuilder}

object DynamoDBClient {

  def apply(dynamoDBDataStoreSettings: DynamoDBDataStoreSettings): AmazonDynamoDBClient = {
    val dynamoAKID = dynamoDBDataStoreSettings.key
    val dynamoSecret = dynamoDBDataStoreSettings.secret
    val dynamoEndpoint = dynamoDBDataStoreSettings.endpoint
    val dynamoRegion = dynamoDBDataStoreSettings.region

    System.getProperties.setProperty("aws.accessKeyId", dynamoAKID)
    System.getProperties.setProperty("aws.secretKey", dynamoSecret)
    val credentials = new BasicAWSCredentials(dynamoAKID, dynamoSecret)
    AmazonDynamoDBClientBuilder
      .standard()
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(dynamoEndpoint, dynamoRegion))
      .build()
      .asInstanceOf[AmazonDynamoDBClient]
  }
} 
Example 4
Source File: SqsMessageQueueProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.sqs.queue
import cats.effect.IO
import cats.implicits._
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.model.QueueDoesNotExistException
import com.amazonaws.services.sqs.{AmazonSQSAsync, AmazonSQSAsyncClientBuilder}
import org.slf4j.LoggerFactory
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.Blocker
import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider}

import scala.util.matching.Regex
import cats.effect.ContextShift

class SqsMessageQueueProvider extends MessageQueueProvider {
  import SqsMessageQueueProvider._

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)

  def load(config: MessageQueueConfig): IO[MessageQueue] =
    for {
      settingsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SqsMessageQueueSettings](_)
      )
      _ <- IO.fromEither(validateQueueName(settingsConfig.queueName))
      client <- setupClient(settingsConfig)
      queueUrl <- setupQueue(client, settingsConfig.queueName)
      _ <- IO(logger.error(s"Queue URL: $queueUrl\n"))
    } yield new SqsMessageQueue(queueUrl, client)

  def validateQueueName(queueName: String): Either[InvalidQueueName, String] = {

    
    val validQueueNameRegex: Regex = """^([\w\-]{1,80})$""".r

    validQueueNameRegex
      .findFirstIn(queueName)
      .map(Right(_))
      .getOrElse(Left(InvalidQueueName(queueName)))
  }

  def setupClient(sqsMessageQueueSettings: SqsMessageQueueSettings): IO[AmazonSQSAsync] =
    IO {
      logger.error(
        s"Setting up queue client with settings: " +
          s"service endpoint: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"signing region: ${sqsMessageQueueSettings.serviceEndpoint}; " +
          s"queue name: ${sqsMessageQueueSettings.queueName}"
      )
      AmazonSQSAsyncClientBuilder
        .standard()
        .withEndpointConfiguration(
          new EndpointConfiguration(
            sqsMessageQueueSettings.serviceEndpoint,
            sqsMessageQueueSettings.signingRegion
          )
        )
        .withCredentials(
          new AWSStaticCredentialsProvider(
            new BasicAWSCredentials(
              sqsMessageQueueSettings.accessKey,
              sqsMessageQueueSettings.secretKey
            )
          )
        )
        .build()
    }

  def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] =
    // Create queue if it doesn't exist
    IO {
      logger.error(s"Setting up queue with name [$queueName]")
      client.getQueueUrl(queueName).getQueueUrl
    }.recoverWith {
      case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl)
    }
}

object SqsMessageQueueProvider {
  final case class InvalidQueueName(queueName: String)
      extends Throwable(
        s"Invalid queue name: $queueName. Must be 1-80 alphanumeric, hyphen or underscore characters. FIFO queues " +
          "(queue names ending in \".fifo\") are not supported."
      )

  private val logger = LoggerFactory.getLogger(classOf[SqsMessageQueueProvider])
} 
Example 5
Source File: SnsNotifierProvider.scala    From vinyldns   with Apache License 2.0 5 votes vote down vote up
package vinyldns.api.notifier.sns

import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider}
import vinyldns.core.domain.membership.UserRepository
import pureconfig._
import pureconfig.generic.auto._
import pureconfig.module.catseffect.syntax._
import cats.effect.{Blocker, ContextShift, IO}
import com.amazonaws.services.sns.AmazonSNS
import org.slf4j.LoggerFactory
import com.amazonaws.services.sns.AmazonSNSClientBuilder
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.auth.AWSStaticCredentialsProvider
import com.amazonaws.auth.BasicAWSCredentials

class SnsNotifierProvider extends NotifierProvider {

  private implicit val cs: ContextShift[IO] =
    IO.contextShift(scala.concurrent.ExecutionContext.global)
  private val logger = LoggerFactory.getLogger(classOf[SnsNotifierProvider])

  def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] =
    for {
      snsConfig <- Blocker[IO].use(
        ConfigSource.fromConfig(config.settings).loadF[IO, SnsNotifierConfig](_)
      )
      client <- createClient(snsConfig)
    } yield new SnsNotifier(snsConfig, client)

  def createClient(config: SnsNotifierConfig): IO[AmazonSNS] = IO {
    logger.error(
      "Setting up sns notifier client with settings: " +
        s"service endpoint: ${config.serviceEndpoint}; " +
        s"signing region: ${config.signingRegion}; " +
        s"topic name: ${config.topicArn}"
    )
    AmazonSNSClientBuilder.standard
      .withEndpointConfiguration(
        new EndpointConfiguration(config.serviceEndpoint, config.signingRegion)
      )
      .withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(config.accessKey, config.secretKey)
        )
      )
      .build()
  }

} 
Example 6
Source File: BaseIntegrationSpec.scala    From spark-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.traviscrawford.spark.dynamodb

import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder
import com.amazonaws.services.dynamodbv2.document.DynamoDB
import com.amazonaws.services.dynamodbv2.document.Item
import com.amazonaws.services.dynamodbv2.model._
import org.apache.spark.sql.SparkSession
import org.scalatest._

import scala.collection.JavaConversions._


trait BaseIntegrationSpec extends FlatSpec with Matchers {
  protected val spark = BaseIntegrationSpec.spark

  protected val LocalDynamoDBPort = System.getProperty("dynamodb.port")
  protected val LocalDynamoDBEndpoint = s"http://localhost:$LocalDynamoDBPort"
  protected val TestUsersTableName = "test_users"
  protected val UserIdKey = "user_id"
  protected val UsernameKey = "username"
  protected val CreatedAtKey = "__createdAt"

  override def withFixture(test: NoArgTest): Outcome = {
    initializeTestUsersTable()
    super.withFixture(test)
  }

  private def initializeTestUsersTable(): Unit = {
    val amazonDynamoDBClient = AmazonDynamoDBClientBuilder.standard()
      .withEndpointConfiguration(new EndpointConfiguration(LocalDynamoDBEndpoint, "us-west-2"))
      .build()

    val dynamodb = new DynamoDB(amazonDynamoDBClient)

    try {
      dynamodb.getTable(TestUsersTableName).delete()
    } catch {
      case _: ResourceNotFoundException => // pass
    }

    val createTableRequest = new CreateTableRequest()
      .withTableName(TestUsersTableName)
      .withAttributeDefinitions(Seq(new AttributeDefinition(UserIdKey, "N")))
      .withKeySchema(Seq(new KeySchemaElement(UserIdKey, "HASH")))
      .withProvisionedThroughput(new ProvisionedThroughput(10L, 10L))

    val table = dynamodb.createTable(createTableRequest)

    assert(table.getTableName == TestUsersTableName)

    val items = Seq(
      new Item().withNumber(UserIdKey, 1).withString(UsernameKey, "a").withNumber(CreatedAtKey, 11),
      new Item().withNumber(UserIdKey, 2).withString(UsernameKey, "b").withNumber(CreatedAtKey, 22),
      new Item().withNumber(UserIdKey, 3).withString(UsernameKey, "c").withNumber(CreatedAtKey, 33))

    items.foreach(table.putItem)
  }
}

object BaseIntegrationSpec {
  private val spark = SparkSession.builder
    .master("local")
    .appName(this.getClass.getName)
    .getOrCreate()
} 
Example 7
Source File: DynamoDBContainerSpecSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.dynamodb

import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials }
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder }
import com.github.j5ik2o.reactive.aws.test.RandomPortSupport
import com.spotify.docker.client.{ DefaultDockerClient, DockerClient }
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import com.whisk.docker.{
  DockerCommandExecutor,
  DockerContainer,
  DockerContainerState,
  DockerFactory,
  DockerReadyChecker
}
import org.scalatest.Suite

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

trait DynamoDBContainerSpecSupport extends DockerTestKit with RandomPortSupport {
  this: Suite =>

  protected val connectTimeout: FiniteDuration     = 3 seconds
  protected val readTimeout: FiniteDuration        = 3 seconds
  protected val readyCheckInterval: FiniteDuration = 1 seconds

  protected val dockerClient: DockerClient =
    DefaultDockerClient
      .fromEnv()
      .connectTimeoutMillis(connectTimeout.toMillis)
      .readTimeoutMillis(readTimeout.toMillis).build()

  protected lazy val accessKeyId     = "x"
  protected lazy val secretAccessKey = "x"
  protected lazy val endpoint        = s"http://127.0.0.1:$dynamoDBPort"

  protected def dynamoDbClient: AmazonDynamoDB =
    AmazonDynamoDBClientBuilder
      .standard().withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(accessKeyId, secretAccessKey)
        )
      ).withEndpointConfiguration(
        new EndpointConfiguration(endpoint, Regions.AP_NORTHEAST_1.getName)
      ).build()

  override implicit def dockerFactory: DockerFactory =
    new SpotifyDockerFactory(dockerClient)

  protected class DynamoDBDockerReadyChecker(dynamoDbClient: AmazonDynamoDB) extends DockerReadyChecker {
    override def apply(container: DockerContainerState)(
        implicit docker: DockerCommandExecutor,
        ec: ExecutionContext
    ): Future[Boolean] = Future.successful {
      try {
        dynamoDbClient.listTables(1)
        Thread.sleep(readyCheckInterval.toMillis)
        true
      } catch {
        case _: Exception =>
          Thread.sleep(readyCheckInterval.toMillis)
          false
      }
    }
  }

  protected lazy val dynamoDBPort: Int = temporaryServerPort()

  protected lazy val dynamoDBContainer: DockerContainer =
    DockerContainer("amazon/dynamodb-local:1.12.0")
      .withPorts(8000 -> Some(dynamoDBPort))
      .withReadyChecker(new DynamoDBDockerReadyChecker(dynamoDbClient))

  abstract override def dockerContainers: List[DockerContainer] =
    dynamoDBContainer :: super.dockerContainers
} 
Example 8
Source File: DynamoDBEmbeddedSpecSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.dynamodb

import java.io.File
import java.util.logging.{ Level, Logger }

import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials }
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.dynamodbv2.local.server.{
  DynamoDBProxyServer,
  LocalDynamoDBRequestHandler,
  LocalDynamoDBServerHandler
}
import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder }
import com.github.j5ik2o.reactive.aws.test.RandomPortSupport
import org.scalatest.{ BeforeAndAfterAll, Suite }
import org.seasar.util.io.ResourceUtil

import scala.concurrent.duration._

@SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var", "org.wartremover.warts.While"))
trait DynamoDBEmbeddedSpecSupport extends BeforeAndAfterAll with RandomPortSupport { this: Suite =>

  protected val waitIntervalForDynamoDBLocal: FiniteDuration = 500 milliseconds

  protected def sqlite4javaLibraryPath: File = new File(ResourceUtil.getBuildDir(getClass), "/../../../native-libs")

  protected val region: Regions = Regions.AP_NORTHEAST_1

  protected lazy val accessKeyId: String = "x"

  protected lazy val secretAccessKey: String = "x"

  protected lazy val dynamoDBPort: Int = temporaryServerPort()

  protected lazy val dynamoDBEndpoint: String = s"http://127.0.0.1:$dynamoDBPort"

  protected lazy val dynamoDBProxyServer: DynamoDBProxyServer = {
    System.setProperty("sqlite4java.library.path", sqlite4javaLibraryPath.toString)
    val inMemory = true
    // scalastyle:off
    val dbPath     = null
    val sharedDb   = false
    val corsParams = null
    // scalastyle:on
    new DynamoDBProxyServer(
      dynamoDBPort,
      new LocalDynamoDBServerHandler(
        new LocalDynamoDBRequestHandler(0, inMemory, dbPath, sharedDb, false),
        corsParams
      )
    )
  }

  protected lazy val dynamoDBClient: AmazonDynamoDB = {
    AmazonDynamoDBClientBuilder
      .standard().withCredentials(
        new AWSStaticCredentialsProvider(
          new BasicAWSCredentials(accessKeyId, secretAccessKey)
        )
      )
      .withEndpointConfiguration(
        new EndpointConfiguration(dynamoDBEndpoint, region.getName)
      ).build()
  }

  protected def waitDynamoDBLocal(): Unit = {
    var isWaken: Boolean = false
    while (!isWaken) {
      try {
        dynamoDBClient.listTables()
        isWaken = true
      } catch {
        case _: Exception =>
          Thread.sleep(waitIntervalForDynamoDBLocal.toMillis)
      }
    }
  }

  protected def startDynamoDBLocal(): Unit = {
    Logger.getLogger("com.almworks.sqlite4java").setLevel(Level.OFF)
    dynamoDBProxyServer.start()
  }

  protected def shutdownDynamoDBLocal(): Unit = {
    dynamoDBProxyServer.stop()
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    startDynamoDBLocal()
    waitDynamoDBLocal()
  }

  override protected def afterAll(): Unit = {
    shutdownDynamoDBLocal()
    super.afterAll()
  }

} 
Example 9
Source File: AwsInitializers.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.aws

import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.kinesis.{AmazonKinesis, AmazonKinesisClientBuilder}
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
import com.amazonaws.services.sns.{AmazonSNS, AmazonSNSAsyncClientBuilder}

object AwsInitializers {
  lazy val accessKeyId = sys.env.getOrElse("AWS_ACCESS_KEY_ID", "")
  lazy val accessKey   = sys.env.getOrElse("AWS_SECRET_ACCESS_KEY", "")
  lazy val credentials = new BasicAWSCredentials(accessKeyId, accessKey)

  def createKinesis(): AmazonKinesis = {
    AmazonKinesisClientBuilder
      .standard()
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(sys.env("KINESIS_ENDPOINT"), sys.env("AWS_REGION")))
      .build()
  }

  def createSns(): AmazonSNS = {
    AmazonSNSAsyncClientBuilder.standard
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(sys.env("SNS_ENDPOINT"), sys.env("AWS_REGION")))
      .build
  }

  def createS3(): AmazonS3 = {
    AmazonS3ClientBuilder.standard
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION")))
      .build
  }

  def createExportDataS3(): AmazonS3 = {
    AmazonS3ClientBuilder.standard
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(sys.env("DATA_EXPORT_S3_ENDPOINT"), sys.env("AWS_REGION")))
      .build
  }

  // This is still in the old SBS AWS account
  def createS3Fileupload(): AmazonS3 = {
    val credentials = new BasicAWSCredentials(
      sys.env("FILEUPLOAD_S3_AWS_ACCESS_KEY_ID"),
      sys.env("FILEUPLOAD_S3_AWS_SECRET_ACCESS_KEY")
    )

    AmazonS3ClientBuilder.standard
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION")))
      .build
  }
} 
Example 10
Source File: LambdaDeploymentAccount.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.shared.functions.lambda

import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
import cool.graph.shared.models.Project
import play.api.libs.json.Json
import software.amazon.awssdk.auth.{AwsCredentials, StaticCredentialsProvider}
import software.amazon.awssdk.regions.Region
import software.amazon.awssdk.services.lambda.LambdaAsyncClient

object LambdaDeploymentAccount {
  implicit val lambdaDeploymentBucket        = Json.format[LambdaDeploymentBucket]
  implicit val lambdaDeploymentAccountFormat = Json.format[LambdaDeploymentAccount]
}

case class LambdaDeploymentAccount(
    id: String,
    accessKeyID: String,
    accessKey: String,
    deployIamArn: String,
    deploymentEnabled: Boolean,
    deploymentBuckets: Vector[LambdaDeploymentBucket]
) {
  lazy val credentialsProvider = new StaticCredentialsProvider(new AwsCredentials(accessKeyID, accessKey))
  lazy val s3Credentials       = new BasicAWSCredentials(accessKeyID, accessKey)

  def bucket(project: Project): String = {
    val region = getRegion(project.region.toString)
    deploymentBuckets.find(_.region == region).getOrElse(sys.error("Region is not supported for lambda deployment")).deploymentBucket
  }

  def lambdaClient(project: Project): LambdaAsyncClient =
    LambdaAsyncClient
      .builder()
      .region(Region.of(project.region.toString))
      .credentialsProvider(credentialsProvider)
      .build()

  def s3Client(project: Project): AmazonS3 = {
    val region = getRegion(project.region.toString)
    AmazonS3ClientBuilder.standard
      .withCredentials(new AWSStaticCredentialsProvider(s3Credentials))
      .withEndpointConfiguration(new EndpointConfiguration(s"s3-$region.amazonaws.com", region))
      .build
  }

  private def getRegion(region: String) = Region.of(region).toString
}

case class LambdaDeploymentBucket(region: String, deploymentBucket: String) 
Example 11
Source File: AbstractInMemoryTest.scala    From spark-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.audienceproject.spark.dynamodb

import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.document.{DynamoDB, Item}
import com.amazonaws.services.dynamodbv2.local.main.ServerRunner
import com.amazonaws.services.dynamodbv2.local.server.DynamoDBProxyServer
import com.amazonaws.services.dynamodbv2.model.{AttributeDefinition, CreateTableRequest, KeySchemaElement, ProvisionedThroughput}
import com.amazonaws.services.dynamodbv2.{AmazonDynamoDB, AmazonDynamoDBClientBuilder}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, FunSuite}

class AbstractInMemoryTest extends FunSuite with BeforeAndAfterAll {

    val server: DynamoDBProxyServer = ServerRunner.createServerFromCommandLineArgs(Array("-inMemory"))

    val client: AmazonDynamoDB = AmazonDynamoDBClientBuilder.standard()
        .withEndpointConfiguration(new EndpointConfiguration(System.getProperty("aws.dynamodb.endpoint"), "us-east-1"))
        .build()
    val dynamoDB: DynamoDB = new DynamoDB(client)

    val spark: SparkSession = SparkSession.builder
        .master("local")
        .appName(this.getClass.getName)
        .getOrCreate()

    spark.sparkContext.setLogLevel("ERROR")

    override def beforeAll(): Unit = {
        server.start()

        // Create a test table.
        dynamoDB.createTable(new CreateTableRequest()
            .withTableName("TestFruit")
            .withAttributeDefinitions(new AttributeDefinition("name", "S"))
            .withKeySchema(new KeySchemaElement("name", "HASH"))
            .withProvisionedThroughput(new ProvisionedThroughput(5L, 5L)))

        // Populate with test data.
        val table = dynamoDB.getTable("TestFruit")
        for ((name, color, weight) <- Seq(
            ("apple", "red", 0.2), ("banana", "yellow", 0.15), ("watermelon", "red", 0.5),
            ("grape", "green", 0.01), ("pear", "green", 0.2), ("kiwi", "green", 0.05),
            ("blackberry", "purple", 0.01), ("blueberry", "purple", 0.01), ("plum", "purple", 0.1)
        )) {
            table.putItem(new Item()
                .withString("name", name)
                .withString("color", color)
                .withDouble("weightKg", weight))
        }
    }

    override def afterAll(): Unit = {
        client.deleteTable("TestFruit")
        server.stop()
    }

} 
Example 12
Source File: AwsEndpointConfiguration.scala    From embulk-output-s3_parquet   with MIT License 5 votes vote down vote up
package org.embulk.output.s3_parquet.aws

import java.util.Optional

import com.amazonaws.client.builder.AwsClientBuilder
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.{DefaultAwsRegionProviderChain, Regions}
import org.embulk.config.{Config, ConfigDefault}
import org.embulk.output.s3_parquet.aws.AwsEndpointConfiguration.Task

import scala.util.Try

object AwsEndpointConfiguration {

  trait Task {

    @Config("endpoint")
    @ConfigDefault("null")
    def getEndpoint: Optional[String]

    @Config("region")
    @ConfigDefault("null")
    def getRegion: Optional[String]

  }

  def apply(task: Task): AwsEndpointConfiguration = {
    new AwsEndpointConfiguration(task)
  }
}

class AwsEndpointConfiguration(task: Task) {

  def configureAwsClientBuilder[S <: AwsClientBuilder[S, T], T](
      builder: AwsClientBuilder[S, T]
  ): Unit = {
    if (task.getRegion.isPresent && task.getEndpoint.isPresent) {
      val ec =
        new EndpointConfiguration(task.getEndpoint.get, task.getRegion.get)
      builder.setEndpointConfiguration(ec)
    }
    else if (task.getRegion.isPresent && !task.getEndpoint.isPresent) {
      builder.setRegion(task.getRegion.get)
    }
    else if (!task.getRegion.isPresent && task.getEndpoint.isPresent) {
      val r: String = Try(new DefaultAwsRegionProviderChain().getRegion)
        .getOrElse(Regions.DEFAULT_REGION.getName)
      val e: String = task.getEndpoint.get
      val ec = new EndpointConfiguration(e, r)
      builder.setEndpointConfiguration(ec)
    }
  }

} 
Example 13
Source File: S3Minio.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.s3

import java.net.ServerSocket

import actionContainers.ActionContainer
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.AmazonS3ClientBuilder
import com.typesafe.config.ConfigFactory
import common.{SimpleExec, StreamLogging}
import org.scalatest.{BeforeAndAfterAll, FlatSpec}
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer}

import scala.concurrent.duration._
import scala.reflect.ClassTag

trait S3Minio extends FlatSpec with BeforeAndAfterAll with StreamLogging {
  def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
                                                       logging: Logging,
                                                       materializer: ActorMaterializer): AttachmentStore = {
    val config = ConfigFactory.parseString(s"""
      |whisk {
      |     s3 {
      |      alpakka {
      |         aws {
      |           credentials {
      |             provider = static
      |             access-key-id = "$accessKey"
      |             secret-access-key = "$secretAccessKey"
      |           }
      |           region {
      |             provider = static
      |             default-region = us-west-2
      |           }
      |         }
      |         endpoint-url = "http://localhost:$port"
      |      }
      |      bucket = "$bucket"
      |      $prefixConfig
      |     }
      |}
      """.stripMargin).withFallback(ConfigFactory.load())
    S3AttachmentStoreProvider.makeStore[D](config)
  }

  private val accessKey = "TESTKEY"
  private val secretAccessKey = "TESTSECRET"
  private val port = freePort()
  private val bucket = "test-ow-travis"

  private def prefixConfig = {
    if (bucketPrefix.nonEmpty) s"prefix = $bucketPrefix" else ""
  }

  protected def bucketPrefix: String = ""

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    dockerExec(
      s"run -d -e MINIO_ACCESS_KEY=$accessKey -e MINIO_SECRET_KEY=$secretAccessKey -p $port:9000 minio/minio server /data")
    println(s"Started minio on $port")
    createTestBucket()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    val containerId = dockerExec("ps -q --filter ancestor=minio/minio")
    containerId.split("\n").map(_.trim).foreach(id => dockerExec(s"stop $id"))
    println(s"Stopped minio container")
  }

  def createTestBucket(): Unit = {
    val endpoint = new EndpointConfiguration(s"http://localhost:$port", "us-west-2")
    val client = AmazonS3ClientBuilder.standard
      .withPathStyleAccessEnabled(true)
      .withEndpointConfiguration(endpoint)
      .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretAccessKey)))
      .build

    org.apache.openwhisk.utils.retry(client.createBucket(bucket), 6, Some(1.minute))
    println(s"Created bucket $bucket")
  }

  private def dockerExec(cmd: String): String = {
    implicit val tid: TransactionId = TransactionId.testing
    val command = s"${ActionContainer.dockerCmd} $cmd"
    val cmdSeq = command.split(" ").map(_.trim).filter(_.nonEmpty)
    val (out, err, code) = SimpleExec.syncRunCmd(cmdSeq)
    assert(code == 0, s"Error occurred for command '$command'. Exit code: $code, Error: $err")
    out
  }

  private def freePort(): Int = {
    val socket = new ServerSocket(0)
    try socket.getLocalPort
    finally if (socket != null) socket.close()
  }
} 
Example 14
Source File: AmazonKinesis.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.kinesis.{AmazonKinesisClientBuilder, AmazonKinesis => AWSKinesis}
import jp.co.bizreach.kinesis.action.PutRecordAction

object AmazonKinesis {
  def apply()(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withClientConfiguration(config)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(client: AWSKinesis): AmazonKinesis = {
    new AmazonKinesis(client) with PutRecordAction
  }
}

class AmazonKinesis(client: AWSKinesis){
  self: PutRecordAction =>

  
  def putRecordsWithRetry(request: PutRecordsRequest): Seq[Either[PutRecordsResultEntry, PutRecordsResultEntry]] = {
    withPutsRetry(request.records){ entry =>
      client.putRecords(PutRecordsRequest(request.streamName, entry))
    }
  }

  def shutdown(): Unit = {
    client.shutdown()
  }
} 
Example 15
Source File: KinesisRDDWriter.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis.spark

import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import jp.co.bizreach.kinesis._
import org.apache.commons.codec.digest.DigestUtils
import org.apache.spark.TaskContext
import org.json4s.jackson.JsonMethods
import org.json4s.{DefaultFormats, Extraction, Formats}
import org.slf4j.LoggerFactory

class KinesisRDDWriter[A <: AnyRef](streamName: String, region: Regions,
                                    credentials: SparkAWSCredentials,
                                    chunk: Int, endpoint: Option[String]) extends Serializable {
  private val logger = LoggerFactory.getLogger(getClass)

  def write(task: TaskContext, data: Iterator[A]): Unit = {
    // send data, including retry
    def put(a: Seq[PutRecordsEntry]) = endpoint.map(e => KinesisRDDWriter.endpointClient(credentials)(e)(region))
      .getOrElse(KinesisRDDWriter.client(credentials)(region))
      .putRecordsWithRetry(PutRecordsRequest(streamName, a))
      .zipWithIndex.collect { case (Left(e), i) => a(i) -> s"${e.errorCode}: ${e.errorMessage}" }

    val errors = data.foldLeft(
      (Nil: Seq[PutRecordsEntry], Nil: Seq[(PutRecordsEntry, String)])
    ){ (z, x) =>
      val (records, failed) = z
      val payload = serialize(x)
      val entry   = PutRecordsEntry(DigestUtils.sha256Hex(payload), payload)

      // record exceeds max size
      if (entry.recordSize > recordMaxDataSize)
        records -> ((entry -> "per-record size limit") +: failed)

      // execute
      else if (records.size >= chunk || (records.map(_.recordSize).sum + entry.recordSize) >= recordsMaxDataSize)
        (entry +: Nil) -> (put(records) ++ failed)

      // buffering
      else
        (entry +: records) -> failed
    } match {
      case (Nil, e)  => e
      case (rest, e) => put(rest) ++ e
    }

    // failed records
    if (errors.nonEmpty) dump(errors)
  }

  protected def dump(errors: Seq[(PutRecordsEntry, String)]): Unit =
    logger.error(
      s"""Could not put record, count: ${errors.size}, following details:
         |${errors map { case (entry, message) => message + "\n" + new String(entry.data, "UTF-8") } mkString "\n"}
       """.stripMargin)

  protected def serialize(a: A)(implicit formats: Formats = DefaultFormats): Array[Byte] =
    JsonMethods.mapper.writeValueAsBytes(Extraction.decompose(a)(formats))

}

object KinesisRDDWriter {
  private val cache = collection.concurrent.TrieMap.empty[Regions, AmazonKinesis]


  private val client: SparkAWSCredentials => Regions => AmazonKinesis = {
    credentials => implicit region =>
      cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider))
  }

  private val endpointClient: SparkAWSCredentials => String => Regions => AmazonKinesis = {
    credentials => endpoint => implicit region =>
      cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider, new EndpointConfiguration(endpoint, region.getName)))
  }

} 
Example 16
Source File: StsSdkHelpers.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.testkit.awssdk

import akka.http.scaladsl.model.Uri
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.securitytoken.{AWSSecurityTokenService, AWSSecurityTokenServiceClientBuilder}


trait StsSdkHelpers {
  def getAmazonSTSSdk(uri: Uri): AWSSecurityTokenService = {
    AWSSecurityTokenServiceClientBuilder
      .standard()
      .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("accesskey", "secretkey")))
      .withEndpointConfiguration(new EndpointConfiguration(
        s"${uri.scheme}://${uri.authority.host.address}:${uri.authority.port}", Regions.DEFAULT_REGION.getName)
      )
      .build()
  }
} 
Example 17
Source File: S3SdkHelpers.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.testkit.awssdk

import java.io.File

import akka.http.scaladsl.model.Uri.Authority
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.transfer.TransferManagerBuilder
import com.amazonaws.services.s3.transfer.model.UploadResult
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
import com.typesafe.config.ConfigFactory

import scala.collection.JavaConverters._


trait S3SdkHelpers {
  val awsRegion = ConfigFactory.load().getString("rokku.storage.s3.region")

  def getAmazonS3(authority: Authority,
                  credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token")
                 ): AmazonS3 = {
    val cliConf = new ClientConfiguration()
    cliConf.setMaxErrorRetry(1)

    AmazonS3ClientBuilder
      .standard()
      .withClientConfiguration(cliConf)
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withPathStyleAccessEnabled(true)
      .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion))
      .build()
  }

  def getKeysInBucket(sdk: AmazonS3, bucket: String): List[String] =
    sdk
      .listObjectsV2(bucket)
      .getObjectSummaries
      .asScala.toList
      .map(_.getKey)

  def doMultiPartUpload(sdk: AmazonS3, bucket: String, file: String, key: String): UploadResult = {
    val upload = TransferManagerBuilder
      .standard()
      .withS3Client(sdk)
      .build()
      .upload(bucket, key, new File(file))

    upload.waitForUploadResult()
  }
} 
Example 18
Source File: RokkuS3ProxyVirtualHostedItTest.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy

import akka.http.scaladsl.model.Uri.Authority
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}

class RokkuS3ProxyVirtualHostedItTest extends RokkuS3ProxyItTest {

  override def getAmazonS3(authority: Authority,
                           credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token")
                          ): AmazonS3 = {
    val cliConf = new ClientConfiguration()
    cliConf.setMaxErrorRetry(1)

    AmazonS3ClientBuilder
      .standard()
      .withClientConfiguration(cliConf)
      .withCredentials(new AWSStaticCredentialsProvider(credentials))
      .withPathStyleAccessEnabled(false)
      .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion))
      .build()
  }
} 
Example 19
Source File: SqsClientSettingsSpec.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.mockito.MockitoSugar._

class SqsClientSettingsSpec extends FlatSpec with Matchers {

  it should "parse configuration" in {
    val conf = ConfigFactory.parseString(
      """
        reactive-sqs {
          endpoint = "http://localhost:9324/"
          region = "eu-west-1"
          queue-url = "http://localhost:9324/queue/queue1"
          max-number-of-messages = 10
          visibility-timeout = 60
          wait-time-seconds = 5
        }
      """)
      .getConfig("reactive-sqs")

    val settings = SqsSettings(
      conf,
      Some(mock[AWSCredentialsProvider]),
      Some(mock[ClientConfiguration])
    )

    settings.endpoint.get.getServiceEndpoint shouldBe "http://localhost:9324/"
    settings.endpoint.get.getSigningRegion shouldBe "eu-west-1"
    settings.queueUrl shouldBe "http://localhost:9324/queue/queue1"
    settings.maxNumberOfMessages shouldBe 10
    settings.visibilityTimeout shouldBe Some(60)
    settings.waitTimeSeconds shouldBe 5
  }

  it should "support optional parameters" in {
    val conf = ConfigFactory.parseString(
      """
        reactive-sqs {
          queue-url = "http://localhost:9324/queue/queue1"
          wait-time-seconds = 5
        }
      """)
      .getConfig("reactive-sqs")

    val settings = SqsSettings(
      conf,
      Some(mock[AWSCredentialsProvider]),
      Some(mock[ClientConfiguration])
    )

    settings.endpoint shouldBe None
    settings.queueUrl shouldBe "http://localhost:9324/queue/queue1"
    settings.maxNumberOfMessages shouldBe 10
    settings.visibilityTimeout shouldBe None
    settings.waitTimeSeconds shouldBe 5
  }
} 
Example 20
Source File: SqsSettings.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import akka.actor.ActorSystem
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.AmazonSQSAsync
import com.typesafe.config.Config
import collection.JavaConverters._

object SqsSettings {
  private val defaultAWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
  private val defaultAWSClientConfiguration = new ClientConfiguration()
  private val defaultMaxNumberOfMessages = 10
  private val defaultWaitTimeSeconds = 10
  private val configurationRoot = "akka-stream-sqs"

  def apply(
             queueUrl: String,
             maxNumberOfMessages: Int = defaultMaxNumberOfMessages,
             waitTimeSeconds: Int = defaultWaitTimeSeconds,
             awsCredentialsProvider: Option[AWSCredentialsProvider] = None,
             awsClientConfiguration: Option[ClientConfiguration] = None,
             awsClient: Option[AmazonSQSAsync] = None,
             endpoint: Option[EndpointConfiguration] = None,
             visibilityTimeout: Option[Int] = None,
             messageAttributes: Seq[String] = List()
           ): SqsSettings =
    new SqsSettings(
      queueUrl = queueUrl,
      maxNumberOfMessages = maxNumberOfMessages,
      waitTimeSeconds = waitTimeSeconds,
      awsClient = awsClient,
      endpoint = endpoint,
      awsCredentialsProvider = awsCredentialsProvider.getOrElse(defaultAWSCredentialsProvider),
      awsClientConfiguration = awsClientConfiguration.getOrElse(defaultAWSClientConfiguration),
      visibilityTimeout = visibilityTimeout,
      messageAttributes = messageAttributes
    )

  def apply(system: ActorSystem): SqsSettings = apply(system, None, None)

  def apply(
             system: ActorSystem,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings =
    apply(system.settings.config.getConfig(configurationRoot), awsCredentialsProvider, awsClientConfiguration)

  def apply(config: Config): SqsSettings = apply(config, None, None)

  def apply(
             config: Config,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings = {
    apply(
      queueUrl = config.getString("queue-url"),
      maxNumberOfMessages = if (config.hasPath("max-number-of-messages")) config.getInt("max-number-of-messages") else defaultMaxNumberOfMessages,
      waitTimeSeconds = if (config.hasPath("wait-time-seconds")) config.getInt("wait-time-seconds") else defaultWaitTimeSeconds,
      awsCredentialsProvider = awsCredentialsProvider,
      awsClientConfiguration = awsClientConfiguration,
      endpoint = if (config.hasPath("endpoint") && config.hasPath("region")) Some(new EndpointConfiguration(config.getString("endpoint"), config.getString("region"))) else None,
      visibilityTimeout = if (config.hasPath("visibility-timeout")) Some(config.getInt("visibility-timeout")) else None,
      messageAttributes = if (config.hasPath("message-attributes")) config.getStringList("message-attributes").asScala else List()
    )
  }
}

case class SqsSettings(queueUrl: String,
                       maxNumberOfMessages: Int,
                       waitTimeSeconds: Int,
                       awsClient: Option[AmazonSQSAsync],
                       endpoint: Option[EndpointConfiguration],
                       awsCredentialsProvider: AWSCredentialsProvider,
                       awsClientConfiguration: ClientConfiguration,
                       visibilityTimeout: Option[Int],
                       messageAttributes: Seq[String]) 
Example 21
Source File: KinesisTestConsumer.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import java.util.Collections

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord
import com.amazonaws.services.kinesis.model._
import com.amazonaws.services.kinesis.{AmazonKinesisAsyncClient, _}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration

object KinesisTestConsumer {

  
  def retrieveRecords(streamName: String, batchSize: Int): List[String] = {
    getShards(streamName)
      .flatMap { shard =>
        val getRecordsRequest = new GetRecordsRequest
        getRecordsRequest.setShardIterator(getShardIterator(streamName, shard))
        getRecordsRequest.setLimit(batchSize)
        client.getRecords(getRecordsRequest).getRecords.asScala.toList
      }
      .flatMap { record: Record =>
        UserRecord
          .deaggregate(Collections.singletonList(record))
          .asScala
          .map { ur =>
            new String(ur.getData.array(), java.nio.charset.StandardCharsets.UTF_8)
          }
      }
  }

  private def getShardIterator(streamName: String, shard: Shard) = {
    client
      .getShardIterator(streamName, shard.getShardId, "TRIM_HORIZON")
      .getShardIterator
  }

  private def getShards(streamName: String) = {
    client
      .describeStream(streamName)
      .getStreamDescription
      .getShards
      .asScala
      .toList
  }

  def shutdown(): Unit = client.shutdown()

} 
Example 22
Source File: V1DynamoDBClientBuilderUtils.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import akka.actor.DynamicAccess
import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials }
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDBAsyncClientBuilder, AmazonDynamoDBClientBuilder }
import com.github.j5ik2o.akka.persistence.dynamodb.client.v1.{
  MonitoringListenerProvider,
  RequestHandlersProvider,
  RequestMetricCollectorProvider
}
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig

object V1DynamoDBClientBuilderUtils {

  def setupSync(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): AmazonDynamoDBClientBuilder = {
    val cc = V1ClientConfigurationUtils.setup(dynamicAccess, pluginConfig)

    val monitoringListenerProvider     = MonitoringListenerProvider.create(dynamicAccess, pluginConfig)
    val requestHandlersProvider        = RequestHandlersProvider.create(dynamicAccess, pluginConfig)
    val requestMetricCollectorProvider = RequestMetricCollectorProvider.create(dynamicAccess, pluginConfig)

    val builder = AmazonDynamoDBClientBuilder
      .standard().withClientConfiguration(cc)

    //    builder.setClientSideMonitoringConfigurationProvider()
    monitoringListenerProvider.create.foreach { m => builder.setMonitoringListener(m) }
    builder.setRequestHandlers(requestHandlersProvider.create: _*)
    requestMetricCollectorProvider.create.foreach { r => builder.setMetricsCollector(r) }

    (pluginConfig.clientConfig.accessKeyId, pluginConfig.clientConfig.secretAccessKey) match {
      case (Some(a), Some(s)) =>
        builder.setCredentials(
          new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s))
        )
      case _ =>
    }
    (pluginConfig.clientConfig.region, pluginConfig.clientConfig.endpoint) match {
      case (Some(r), Some(e)) =>
        builder.setEndpointConfiguration(new EndpointConfiguration(e, r))
      case (Some(r), _) =>
        builder.setRegion(r)
      case _ =>
    }
    builder
  }

  def setupAsync(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): AmazonDynamoDBAsyncClientBuilder = {
    val cc      = V1ClientConfigurationUtils.setup(dynamicAccess, pluginConfig)
    val builder = AmazonDynamoDBAsyncClientBuilder.standard().withClientConfiguration(cc)
    (pluginConfig.clientConfig.accessKeyId, pluginConfig.clientConfig.secretAccessKey) match {
      case (Some(a), Some(s)) =>
        builder.setCredentials(
          new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s))
        )
      case _ =>
    }
    (pluginConfig.clientConfig.region, pluginConfig.clientConfig.endpoint) match {
      case (Some(r), Some(e)) =>
        builder.setEndpointConfiguration(new EndpointConfiguration(e, r))
      case (Some(r), _) =>
        builder.setRegion(r)
      case _ =>
    }
    builder
  }

}