com.amazonaws.auth.AWSStaticCredentialsProvider Scala Examples
The following examples show how to use com.amazonaws.auth.AWSStaticCredentialsProvider.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: LambdaDeploymentAccount.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.shared.functions.lambda import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import cool.graph.shared.models.Project import play.api.libs.json.Json import software.amazon.awssdk.auth.{AwsCredentials, StaticCredentialsProvider} import software.amazon.awssdk.regions.Region import software.amazon.awssdk.services.lambda.LambdaAsyncClient object LambdaDeploymentAccount { implicit val lambdaDeploymentBucket = Json.format[LambdaDeploymentBucket] implicit val lambdaDeploymentAccountFormat = Json.format[LambdaDeploymentAccount] } case class LambdaDeploymentAccount( id: String, accessKeyID: String, accessKey: String, deployIamArn: String, deploymentEnabled: Boolean, deploymentBuckets: Vector[LambdaDeploymentBucket] ) { lazy val credentialsProvider = new StaticCredentialsProvider(new AwsCredentials(accessKeyID, accessKey)) lazy val s3Credentials = new BasicAWSCredentials(accessKeyID, accessKey) def bucket(project: Project): String = { val region = getRegion(project.region.toString) deploymentBuckets.find(_.region == region).getOrElse(sys.error("Region is not supported for lambda deployment")).deploymentBucket } def lambdaClient(project: Project): LambdaAsyncClient = LambdaAsyncClient .builder() .region(Region.of(project.region.toString)) .credentialsProvider(credentialsProvider) .build() def s3Client(project: Project): AmazonS3 = { val region = getRegion(project.region.toString) AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(s3Credentials)) .withEndpointConfiguration(new EndpointConfiguration(s"s3-$region.amazonaws.com", region)) .build } private def getRegion(region: String) = Region.of(region).toString } case class LambdaDeploymentBucket(region: String, deploymentBucket: String)
Example 2
Source File: S3BrainTest.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.brain import akka.actor.ActorSystem import akka.pattern.ask import akka.testkit.TestKit import akka.util.Timeout import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.sumologic.sumobot.brain.Brain.ValueRetrieved import com.sumologic.sumobot.core.aws.AWSAccounts import com.sumologic.sumobot.test.annotated.SumoBotTestKit import org.scalatest.{BeforeAndAfterAll, Matchers} import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Random class S3BrainTest extends SumoBotTestKit(ActorSystem("S3SingleObjectBrainTest")) with BeforeAndAfterAll with Matchers { lazy val credsOption = AWSAccounts.load(system.settings.config).values.headOption val bucketPrefix = "sumobot-s3-brain" // The tests here only run if there are valid AWS credentials in the configuration. Otherwise, // they're skipped. credsOption foreach { creds => cleanupBuckets(creds) val bucket = bucketPrefix + randomString(5) "S3 brain" should { "persist the contents across reloads" in { implicit val timeout = Timeout(5.seconds) val s3Key = randomString(16) val firstBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key)) firstBrain ! Brain.Store("hello", "world") // Just wait for the next message to return. val firstRetrieval = firstBrain ? Brain.Retrieve("hello") val firstResult = Await.result(firstRetrieval, 5.seconds) firstResult match { case ValueRetrieved(k, v) => k should be("hello") v should be("world") case wrongResult => fail(s"Did not get what we expected: $wrongResult") } // Since we wrote to S3, the 2nd brain should now have the value. val secondBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key)) val secondRetrieval = secondBrain ? Brain.Retrieve("hello") val secondResult = Await.result(secondRetrieval, 5.seconds) secondResult match { case ValueRetrieved(k, v) => k should be("hello") v should be("world") case wrongResult => fail(s"Did not get what we expected: $wrongResult") } } } } private def randomString(length: Int): String = { val alphabet = ('a' to 'z').mkString + ('0' to '9').mkString (1 to length). map(_ => Random.nextInt(alphabet.length)). map(alphabet.charAt).mkString } override def afterAll() { TestKit.shutdownActorSystem(system) credsOption.foreach(cleanupBuckets) } def cleanupBuckets(creds: AWSCredentials): Unit = { val s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(creds)).build() s3.listBuckets().asScala.filter(_.getName.startsWith(bucketPrefix)).foreach { bucket => println(s"Deleting S3 bucket ${bucket.getName}") val objects = s3.listObjects(bucket.getName).getObjectSummaries.asScala.map(_.getKey) objects.foreach { obj => s3.deleteObject(bucket.getName, obj) } s3.deleteBucket(bucket.getName) } } }
Example 3
Source File: AWSSupport.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.plugins.awssupport import akka.actor.ActorLogging import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.services.support.AWSSupportClientBuilder import com.amazonaws.services.support.model.{CaseDetails, DescribeCasesRequest} import com.sumologic.sumobot.core.aws.AWSAccounts import com.sumologic.sumobot.core.model.IncomingMessage import com.sumologic.sumobot.plugins.BotPlugin import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success, Try} class AWSSupport extends BotPlugin with ActorLogging { case class CaseInAccount(account: String, caseDetails: CaseDetails) private val credentials: Map[String, AWSCredentials] = AWSAccounts.load(context.system.settings.config) private val clients = credentials.map{case (id, credentials) => id -> AWSSupportClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(credentials)).build()} override protected def help: String = s""" |I can tell you about AWS support tickets. | |list aws cases - List all AWS support tickets. |show aws case <case> - I'll show you more details about that case. """.stripMargin private val CaseDetails = matchText("show aws case (\\d+).*") private val ListCases = matchText("list aws cases") override protected def receiveIncomingMessage: ReceiveIncomingMessage = { case message@IncomingMessage(ListCases(), _, _, _, _, _, _) => message.respondInFuture { msg => val caseList = getAllCases.map(summary(_) + "\n").mkString("\n") msg.message(caseList) } case message@IncomingMessage(CaseDetails(caseId), _, _, _, _, _, _) => message.respondInFuture { msg => log.info(s"Looking for case $caseId") Try(getAllCases) match { case Success(cases) => cases.find(_.caseDetails.getDisplayId == caseId) match { case None => msg.response("Not a known support case.") case Some(cse) => msg.message(details(cse)) } case Failure(e) if e.getMessage.contains("Invalid case ID:") => msg.response(s"Invalid case ID: $caseId") } } } private def getAllCases: Seq[CaseInAccount] = { clients.toSeq.par.flatMap { tpl => val client = tpl._2 val unresolved = client.describeCases(new DescribeCasesRequest()).getCases.asScala.toList val resolved = client.describeCases(new DescribeCasesRequest().withIncludeResolvedCases(true)).getCases.asScala.toList (unresolved ++ resolved).map(CaseInAccount(tpl._1, _)) }.seq } private def summary(cia: CaseInAccount): String = s"*# ${cia.caseDetails.getDisplayId}:* ${cia.caseDetails.getSubject}\n" + s" - account: ${cia.account}, submitted by: ${cia.caseDetails.getSubmittedBy}, status: ${cia.caseDetails.getStatus}" private def details(cia: CaseInAccount): String = { val latest = cia.caseDetails.getRecentCommunications.getCommunications.asScala.head summary(cia) + "\n\n" + s""" |_${latest.getSubmittedBy} at ${latest.getTimeCreated}_ |${latest.getBody} """.stripMargin } }
Example 4
Source File: S3Brain.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.brain import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import java.util.Properties import akka.actor.{Actor, Props} import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.services.s3.{AmazonS3Client, AmazonS3ClientBuilder} import com.amazonaws.services.s3.model.ObjectMetadata import com.sumologic.sumobot.brain.Brain._ import scala.collection.JavaConverters._ import scala.collection.immutable object S3Brain { def props(credentials: AWSCredentials, bucket: String, s3Key: String): Props = Props(classOf[S3Brain], credentials, bucket, s3Key) } class S3Brain(credentials: AWSCredentials, bucket: String, s3Key: String) extends Actor { private val s3Client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)).build private var brainContents: Map[String, String] = loadFromS3() override def receive: Receive = { case Store(key, value) => brainContents += (key -> value) saveToS3(brainContents) case Remove(key) => brainContents -= key saveToS3(brainContents) case Retrieve(key) => brainContents.get(key) match { case Some(value) => sender() ! ValueRetrieved(key, value) case None => sender() ! ValueMissing(key) } case ListValues(prefix) => sender() ! ValueMap(brainContents.filter(_._1.startsWith(prefix))) } private def loadFromS3(): Map[String, String] = { if (s3Client.doesBucketExistV2(bucket)) { val props = new Properties() props.load(s3Client.getObject(bucket, s3Key).getObjectContent) immutable.Map(props.asScala.toSeq: _*) } else { Map.empty } } private def saveToS3(contents: Map[String, String]): Unit = { if (!s3Client.doesBucketExistV2(bucket)) { s3Client.createBucket(bucket) } val props = new Properties() props.putAll(contents.asJava) val out = new ByteArrayOutputStream() props.store(out, "") out.flush() out.close() val in = new ByteArrayInputStream(out.toByteArray) s3Client.putObject(bucket, s3Key, in, new ObjectMetadata()) } }
Example 5
Source File: S3Utils.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.benchmarks import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} object S3Utils { def minioClient(): AmazonS3 = { val endpointConfig = new EndpointConfiguration("http://localhost:9000", "us-east-1") val clientConfig = new ClientConfiguration() clientConfig.setSignerOverride("AWSS3V4SignerType") AmazonS3ClientBuilder.standard .withPathStyleAccessEnabled(true) .withEndpointConfiguration(endpointConfig) .withClientConfiguration(clientConfig) .withCredentials(new AWSStaticCredentialsProvider(new AWSCredentials { override def getAWSAccessKeyId: String = "minioadmin" override def getAWSSecretKey: String = "minioadmin" })) .build() } def defaultClient(): AmazonS3 = AmazonS3ClientBuilder.defaultClient() }
Example 6
Source File: DynamoDBClient.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.dynamodb.repository import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.dynamodbv2.{AmazonDynamoDBClient, AmazonDynamoDBClientBuilder} object DynamoDBClient { def apply(dynamoDBDataStoreSettings: DynamoDBDataStoreSettings): AmazonDynamoDBClient = { val dynamoAKID = dynamoDBDataStoreSettings.key val dynamoSecret = dynamoDBDataStoreSettings.secret val dynamoEndpoint = dynamoDBDataStoreSettings.endpoint val dynamoRegion = dynamoDBDataStoreSettings.region System.getProperties.setProperty("aws.accessKeyId", dynamoAKID) System.getProperties.setProperty("aws.secretKey", dynamoSecret) val credentials = new BasicAWSCredentials(dynamoAKID, dynamoSecret) AmazonDynamoDBClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(dynamoEndpoint, dynamoRegion)) .build() .asInstanceOf[AmazonDynamoDBClient] } }
Example 7
Source File: SqsMessageQueueProvider.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.sqs.queue import cats.effect.IO import cats.implicits._ import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.sqs.model.QueueDoesNotExistException import com.amazonaws.services.sqs.{AmazonSQSAsync, AmazonSQSAsyncClientBuilder} import org.slf4j.LoggerFactory import pureconfig._ import pureconfig.generic.auto._ import pureconfig.module.catseffect.syntax._ import cats.effect.Blocker import vinyldns.core.queue.{MessageQueue, MessageQueueConfig, MessageQueueProvider} import scala.util.matching.Regex import cats.effect.ContextShift class SqsMessageQueueProvider extends MessageQueueProvider { import SqsMessageQueueProvider._ private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global) def load(config: MessageQueueConfig): IO[MessageQueue] = for { settingsConfig <- Blocker[IO].use( ConfigSource.fromConfig(config.settings).loadF[IO, SqsMessageQueueSettings](_) ) _ <- IO.fromEither(validateQueueName(settingsConfig.queueName)) client <- setupClient(settingsConfig) queueUrl <- setupQueue(client, settingsConfig.queueName) _ <- IO(logger.error(s"Queue URL: $queueUrl\n")) } yield new SqsMessageQueue(queueUrl, client) def validateQueueName(queueName: String): Either[InvalidQueueName, String] = { val validQueueNameRegex: Regex = """^([\w\-]{1,80})$""".r validQueueNameRegex .findFirstIn(queueName) .map(Right(_)) .getOrElse(Left(InvalidQueueName(queueName))) } def setupClient(sqsMessageQueueSettings: SqsMessageQueueSettings): IO[AmazonSQSAsync] = IO { logger.error( s"Setting up queue client with settings: " + s"service endpoint: ${sqsMessageQueueSettings.serviceEndpoint}; " + s"signing region: ${sqsMessageQueueSettings.serviceEndpoint}; " + s"queue name: ${sqsMessageQueueSettings.queueName}" ) AmazonSQSAsyncClientBuilder .standard() .withEndpointConfiguration( new EndpointConfiguration( sqsMessageQueueSettings.serviceEndpoint, sqsMessageQueueSettings.signingRegion ) ) .withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials( sqsMessageQueueSettings.accessKey, sqsMessageQueueSettings.secretKey ) ) ) .build() } def setupQueue(client: AmazonSQSAsync, queueName: String): IO[String] = // Create queue if it doesn't exist IO { logger.error(s"Setting up queue with name [$queueName]") client.getQueueUrl(queueName).getQueueUrl }.recoverWith { case _: QueueDoesNotExistException => IO(client.createQueue(queueName).getQueueUrl) } } object SqsMessageQueueProvider { final case class InvalidQueueName(queueName: String) extends Throwable( s"Invalid queue name: $queueName. Must be 1-80 alphanumeric, hyphen or underscore characters. FIFO queues " + "(queue names ending in \".fifo\") are not supported." ) private val logger = LoggerFactory.getLogger(classOf[SqsMessageQueueProvider]) }
Example 8
Source File: SnsNotifierProvider.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.api.notifier.sns import vinyldns.core.notifier.{Notifier, NotifierConfig, NotifierProvider} import vinyldns.core.domain.membership.UserRepository import pureconfig._ import pureconfig.generic.auto._ import pureconfig.module.catseffect.syntax._ import cats.effect.{Blocker, ContextShift, IO} import com.amazonaws.services.sns.AmazonSNS import org.slf4j.LoggerFactory import com.amazonaws.services.sns.AmazonSNSClientBuilder import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.auth.AWSStaticCredentialsProvider import com.amazonaws.auth.BasicAWSCredentials class SnsNotifierProvider extends NotifierProvider { private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global) private val logger = LoggerFactory.getLogger(classOf[SnsNotifierProvider]) def load(config: NotifierConfig, userRepository: UserRepository): IO[Notifier] = for { snsConfig <- Blocker[IO].use( ConfigSource.fromConfig(config.settings).loadF[IO, SnsNotifierConfig](_) ) client <- createClient(snsConfig) } yield new SnsNotifier(snsConfig, client) def createClient(config: SnsNotifierConfig): IO[AmazonSNS] = IO { logger.error( "Setting up sns notifier client with settings: " + s"service endpoint: ${config.serviceEndpoint}; " + s"signing region: ${config.signingRegion}; " + s"topic name: ${config.topicArn}" ) AmazonSNSClientBuilder.standard .withEndpointConfiguration( new EndpointConfiguration(config.serviceEndpoint, config.signingRegion) ) .withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials(config.accessKey, config.secretKey) ) ) .build() } }
Example 9
Source File: DynamoDBContainerSpecSupport.scala From reactive-aws-clients with MIT License | 5 votes |
package com.github.j5ik2o.reactive.aws.dynamodb import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder } import com.github.j5ik2o.reactive.aws.test.RandomPortSupport import com.spotify.docker.client.{ DefaultDockerClient, DockerClient } import com.whisk.docker.impl.spotify.SpotifyDockerFactory import com.whisk.docker.scalatest.DockerTestKit import com.whisk.docker.{ DockerCommandExecutor, DockerContainer, DockerContainerState, DockerFactory, DockerReadyChecker } import org.scalatest.Suite import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } trait DynamoDBContainerSpecSupport extends DockerTestKit with RandomPortSupport { this: Suite => protected val connectTimeout: FiniteDuration = 3 seconds protected val readTimeout: FiniteDuration = 3 seconds protected val readyCheckInterval: FiniteDuration = 1 seconds protected val dockerClient: DockerClient = DefaultDockerClient .fromEnv() .connectTimeoutMillis(connectTimeout.toMillis) .readTimeoutMillis(readTimeout.toMillis).build() protected lazy val accessKeyId = "x" protected lazy val secretAccessKey = "x" protected lazy val endpoint = s"http://127.0.0.1:$dynamoDBPort" protected def dynamoDbClient: AmazonDynamoDB = AmazonDynamoDBClientBuilder .standard().withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials(accessKeyId, secretAccessKey) ) ).withEndpointConfiguration( new EndpointConfiguration(endpoint, Regions.AP_NORTHEAST_1.getName) ).build() override implicit def dockerFactory: DockerFactory = new SpotifyDockerFactory(dockerClient) protected class DynamoDBDockerReadyChecker(dynamoDbClient: AmazonDynamoDB) extends DockerReadyChecker { override def apply(container: DockerContainerState)( implicit docker: DockerCommandExecutor, ec: ExecutionContext ): Future[Boolean] = Future.successful { try { dynamoDbClient.listTables(1) Thread.sleep(readyCheckInterval.toMillis) true } catch { case _: Exception => Thread.sleep(readyCheckInterval.toMillis) false } } } protected lazy val dynamoDBPort: Int = temporaryServerPort() protected lazy val dynamoDBContainer: DockerContainer = DockerContainer("amazon/dynamodb-local:1.12.0") .withPorts(8000 -> Some(dynamoDBPort)) .withReadyChecker(new DynamoDBDockerReadyChecker(dynamoDbClient)) abstract override def dockerContainers: List[DockerContainer] = dynamoDBContainer :: super.dockerContainers }
Example 10
Source File: DynamoDBEmbeddedSpecSupport.scala From reactive-aws-clients with MIT License | 5 votes |
package com.github.j5ik2o.reactive.aws.dynamodb import java.io.File import java.util.logging.{ Level, Logger } import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.dynamodbv2.local.server.{ DynamoDBProxyServer, LocalDynamoDBRequestHandler, LocalDynamoDBServerHandler } import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder } import com.github.j5ik2o.reactive.aws.test.RandomPortSupport import org.scalatest.{ BeforeAndAfterAll, Suite } import org.seasar.util.io.ResourceUtil import scala.concurrent.duration._ @SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var", "org.wartremover.warts.While")) trait DynamoDBEmbeddedSpecSupport extends BeforeAndAfterAll with RandomPortSupport { this: Suite => protected val waitIntervalForDynamoDBLocal: FiniteDuration = 500 milliseconds protected def sqlite4javaLibraryPath: File = new File(ResourceUtil.getBuildDir(getClass), "/../../../native-libs") protected val region: Regions = Regions.AP_NORTHEAST_1 protected lazy val accessKeyId: String = "x" protected lazy val secretAccessKey: String = "x" protected lazy val dynamoDBPort: Int = temporaryServerPort() protected lazy val dynamoDBEndpoint: String = s"http://127.0.0.1:$dynamoDBPort" protected lazy val dynamoDBProxyServer: DynamoDBProxyServer = { System.setProperty("sqlite4java.library.path", sqlite4javaLibraryPath.toString) val inMemory = true // scalastyle:off val dbPath = null val sharedDb = false val corsParams = null // scalastyle:on new DynamoDBProxyServer( dynamoDBPort, new LocalDynamoDBServerHandler( new LocalDynamoDBRequestHandler(0, inMemory, dbPath, sharedDb, false), corsParams ) ) } protected lazy val dynamoDBClient: AmazonDynamoDB = { AmazonDynamoDBClientBuilder .standard().withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials(accessKeyId, secretAccessKey) ) ) .withEndpointConfiguration( new EndpointConfiguration(dynamoDBEndpoint, region.getName) ).build() } protected def waitDynamoDBLocal(): Unit = { var isWaken: Boolean = false while (!isWaken) { try { dynamoDBClient.listTables() isWaken = true } catch { case _: Exception => Thread.sleep(waitIntervalForDynamoDBLocal.toMillis) } } } protected def startDynamoDBLocal(): Unit = { Logger.getLogger("com.almworks.sqlite4java").setLevel(Level.OFF) dynamoDBProxyServer.start() } protected def shutdownDynamoDBLocal(): Unit = { dynamoDBProxyServer.stop() } override protected def beforeAll(): Unit = { super.beforeAll() startDynamoDBLocal() waitDynamoDBLocal() } override protected def afterAll(): Unit = { shutdownDynamoDBLocal() super.afterAll() } }
Example 11
Source File: AwsInitializers.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.aws import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.kinesis.{AmazonKinesis, AmazonKinesisClientBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.amazonaws.services.sns.{AmazonSNS, AmazonSNSAsyncClientBuilder} object AwsInitializers { lazy val accessKeyId = sys.env.getOrElse("AWS_ACCESS_KEY_ID", "") lazy val accessKey = sys.env.getOrElse("AWS_SECRET_ACCESS_KEY", "") lazy val credentials = new BasicAWSCredentials(accessKeyId, accessKey) def createKinesis(): AmazonKinesis = { AmazonKinesisClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("KINESIS_ENDPOINT"), sys.env("AWS_REGION"))) .build() } def createSns(): AmazonSNS = { AmazonSNSAsyncClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("SNS_ENDPOINT"), sys.env("AWS_REGION"))) .build } def createS3(): AmazonS3 = { AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } def createExportDataS3(): AmazonS3 = { AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("DATA_EXPORT_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } // This is still in the old SBS AWS account def createS3Fileupload(): AmazonS3 = { val credentials = new BasicAWSCredentials( sys.env("FILEUPLOAD_S3_AWS_ACCESS_KEY_ID"), sys.env("FILEUPLOAD_S3_AWS_SECRET_ACCESS_KEY") ) AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } }
Example 12
Source File: V1DynamoDBClientBuilderUtils.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.utils import akka.actor.DynamicAccess import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDBAsyncClientBuilder, AmazonDynamoDBClientBuilder } import com.github.j5ik2o.akka.persistence.dynamodb.client.v1.{ MonitoringListenerProvider, RequestHandlersProvider, RequestMetricCollectorProvider } import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig object V1DynamoDBClientBuilderUtils { def setupSync(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): AmazonDynamoDBClientBuilder = { val cc = V1ClientConfigurationUtils.setup(dynamicAccess, pluginConfig) val monitoringListenerProvider = MonitoringListenerProvider.create(dynamicAccess, pluginConfig) val requestHandlersProvider = RequestHandlersProvider.create(dynamicAccess, pluginConfig) val requestMetricCollectorProvider = RequestMetricCollectorProvider.create(dynamicAccess, pluginConfig) val builder = AmazonDynamoDBClientBuilder .standard().withClientConfiguration(cc) // builder.setClientSideMonitoringConfigurationProvider() monitoringListenerProvider.create.foreach { m => builder.setMonitoringListener(m) } builder.setRequestHandlers(requestHandlersProvider.create: _*) requestMetricCollectorProvider.create.foreach { r => builder.setMetricsCollector(r) } (pluginConfig.clientConfig.accessKeyId, pluginConfig.clientConfig.secretAccessKey) match { case (Some(a), Some(s)) => builder.setCredentials( new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) ) case _ => } (pluginConfig.clientConfig.region, pluginConfig.clientConfig.endpoint) match { case (Some(r), Some(e)) => builder.setEndpointConfiguration(new EndpointConfiguration(e, r)) case (Some(r), _) => builder.setRegion(r) case _ => } builder } def setupAsync(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): AmazonDynamoDBAsyncClientBuilder = { val cc = V1ClientConfigurationUtils.setup(dynamicAccess, pluginConfig) val builder = AmazonDynamoDBAsyncClientBuilder.standard().withClientConfiguration(cc) (pluginConfig.clientConfig.accessKeyId, pluginConfig.clientConfig.secretAccessKey) match { case (Some(a), Some(s)) => builder.setCredentials( new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) ) case _ => } (pluginConfig.clientConfig.region, pluginConfig.clientConfig.endpoint) match { case (Some(r), Some(e)) => builder.setEndpointConfiguration(new EndpointConfiguration(e, r)) case (Some(r), _) => builder.setRegion(r) case _ => } builder } }
Example 13
Source File: FileManagerS3Mock.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.api.service import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.services.s3.model.ObjectMetadata import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import org.specs2.mock.Mockito import scala.concurrent.duration._ case class FileManagerS3Mock() extends Mockito { val s3Configuration = AwsS3Configuration("hat-storage-test", "testAwsAccessKey", "testAwsSecret", "eu-west-1", 5.minutes) private val awsCreds: BasicAWSCredentials = new BasicAWSCredentials(s3Configuration.accessKeyId, s3Configuration.secretKey) val mockS3client: AmazonS3 = spy(AmazonS3ClientBuilder.standard() .withRegion("eu-west-1") .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .build()) private val s3ObjectMetadata = new ObjectMetadata() s3ObjectMetadata.setContentLength(123456L) doReturn(s3ObjectMetadata).when(mockS3client).getObjectMetadata("hat-storage-test", "hat.hubofallthings.net/testFile") doNothing.when(mockS3client).deleteObject("hat-storage-test", "hat.hubofallthings.net/deleteFile") }
Example 14
Source File: FileManagerModule.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.modules import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.google.inject.name.Named import com.google.inject.{ AbstractModule, Provides } import net.codingwell.scalaguice.ScalaModule import org.hatdex.hat.api.service.{ AwsS3Configuration, FileManager, FileManagerS3 } import play.api.Configuration import play.api.libs.concurrent.AkkaGuiceSupport class FileManagerModule extends AbstractModule with ScalaModule with AkkaGuiceSupport { override def configure() = { bind[FileManager].to[FileManagerS3] () } @Provides def provideCookieAuthenticatorService(configuration: Configuration): AwsS3Configuration = { import AwsS3Configuration.configLoader configuration.get[AwsS3Configuration]("storage.s3Configuration") } @Provides @Named("s3client-file-manager") def provides3Client(configuration: AwsS3Configuration): AmazonS3 = { val awsCreds: BasicAWSCredentials = new BasicAWSCredentials(configuration.accessKeyId, configuration.secretKey) AmazonS3ClientBuilder.standard() .withRegion(configuration.region) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .build() } }
Example 15
Source File: S3Minio.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.s3 import java.net.ServerSocket import actionContainers.ActionContainer import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.typesafe.config.ConfigFactory import common.{SimpleExec, StreamLogging} import org.scalatest.{BeforeAndAfterAll, FlatSpec} import org.apache.openwhisk.common.{Logging, TransactionId} import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer} import scala.concurrent.duration._ import scala.reflect.ClassTag trait S3Minio extends FlatSpec with BeforeAndAfterAll with StreamLogging { def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore = { val config = ConfigFactory.parseString(s""" |whisk { | s3 { | alpakka { | aws { | credentials { | provider = static | access-key-id = "$accessKey" | secret-access-key = "$secretAccessKey" | } | region { | provider = static | default-region = us-west-2 | } | } | endpoint-url = "http://localhost:$port" | } | bucket = "$bucket" | $prefixConfig | } |} """.stripMargin).withFallback(ConfigFactory.load()) S3AttachmentStoreProvider.makeStore[D](config) } private val accessKey = "TESTKEY" private val secretAccessKey = "TESTSECRET" private val port = freePort() private val bucket = "test-ow-travis" private def prefixConfig = { if (bucketPrefix.nonEmpty) s"prefix = $bucketPrefix" else "" } protected def bucketPrefix: String = "" override protected def beforeAll(): Unit = { super.beforeAll() dockerExec( s"run -d -e MINIO_ACCESS_KEY=$accessKey -e MINIO_SECRET_KEY=$secretAccessKey -p $port:9000 minio/minio server /data") println(s"Started minio on $port") createTestBucket() } override def afterAll(): Unit = { super.afterAll() val containerId = dockerExec("ps -q --filter ancestor=minio/minio") containerId.split("\n").map(_.trim).foreach(id => dockerExec(s"stop $id")) println(s"Stopped minio container") } def createTestBucket(): Unit = { val endpoint = new EndpointConfiguration(s"http://localhost:$port", "us-west-2") val client = AmazonS3ClientBuilder.standard .withPathStyleAccessEnabled(true) .withEndpointConfiguration(endpoint) .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretAccessKey))) .build org.apache.openwhisk.utils.retry(client.createBucket(bucket), 6, Some(1.minute)) println(s"Created bucket $bucket") } private def dockerExec(cmd: String): String = { implicit val tid: TransactionId = TransactionId.testing val command = s"${ActionContainer.dockerCmd} $cmd" val cmdSeq = command.split(" ").map(_.trim).filter(_.nonEmpty) val (out, err, code) = SimpleExec.syncRunCmd(cmdSeq) assert(code == 0, s"Error occurred for command '$command'. Exit code: $code, Error: $err") out } private def freePort(): Int = { val socket = new ServerSocket(0) try socket.getLocalPort finally if (socket != null) socket.close() } }
Example 16
Source File: StsSdkHelpers.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.testkit.awssdk import akka.http.scaladsl.model.Uri import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.securitytoken.{AWSSecurityTokenService, AWSSecurityTokenServiceClientBuilder} trait StsSdkHelpers { def getAmazonSTSSdk(uri: Uri): AWSSecurityTokenService = { AWSSecurityTokenServiceClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("accesskey", "secretkey"))) .withEndpointConfiguration(new EndpointConfiguration( s"${uri.scheme}://${uri.authority.host.address}:${uri.authority.port}", Regions.DEFAULT_REGION.getName) ) .build() } }
Example 17
Source File: S3SdkHelpers.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.testkit.awssdk import java.io.File import akka.http.scaladsl.model.Uri.Authority import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.transfer.TransferManagerBuilder import com.amazonaws.services.s3.transfer.model.UploadResult import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ trait S3SdkHelpers { val awsRegion = ConfigFactory.load().getString("rokku.storage.s3.region") def getAmazonS3(authority: Authority, credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token") ): AmazonS3 = { val cliConf = new ClientConfiguration() cliConf.setMaxErrorRetry(1) AmazonS3ClientBuilder .standard() .withClientConfiguration(cliConf) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withPathStyleAccessEnabled(true) .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion)) .build() } def getKeysInBucket(sdk: AmazonS3, bucket: String): List[String] = sdk .listObjectsV2(bucket) .getObjectSummaries .asScala.toList .map(_.getKey) def doMultiPartUpload(sdk: AmazonS3, bucket: String, file: String, key: String): UploadResult = { val upload = TransferManagerBuilder .standard() .withS3Client(sdk) .build() .upload(bucket, key, new File(file)) upload.waitForUploadResult() } }
Example 18
Source File: RokkuS3ProxyVirtualHostedItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy import akka.http.scaladsl.model.Uri.Authority import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} class RokkuS3ProxyVirtualHostedItTest extends RokkuS3ProxyItTest { override def getAmazonS3(authority: Authority, credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token") ): AmazonS3 = { val cliConf = new ClientConfiguration() cliConf.setMaxErrorRetry(1) AmazonS3ClientBuilder .standard() .withClientConfiguration(cliConf) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withPathStyleAccessEnabled(false) .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion)) .build() } }
Example 19
Source File: S3Client.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.aws import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.model.{ AccessControlList, BucketPolicy, GroupGrantee, Permission } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } trait S3Client { protected[this] implicit def executionContext: ExecutionContext private val logger = new LoggerHandlerWithId protected[this] def storageS3Settings: StorageS3Settings protected[this] lazy val s3Client: AmazonS3 = { val credentials = new BasicAWSCredentials( storageS3Settings.storageS3AdminAccesskey, storageS3Settings.storageS3AdminSecretkey) val endpointConfiguration = new AwsClientBuilder.EndpointConfiguration( s"http://${storageS3Settings.storageS3Authority.host.address()}:${storageS3Settings.storageS3Authority.port}", Regions.US_EAST_1.getName) AmazonS3ClientBuilder.standard() .withPathStyleAccessEnabled(true) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(endpointConfiguration) .build() } protected[this] def setDefaultBucketAclAndPolicy(bucketName: String)(implicit id: RequestId): Future[Unit] = Future { Try { logger.info("setting bucket acls and policies for bucket {}", bucketName) val acl = s3Client.getBucketAcl(bucketName) acl.revokeAllPermissions(GroupGrantee.AuthenticatedUsers) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Read) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Write) s3Client.setBucketAcl(bucketName, acl) s3Client.setBucketPolicy(bucketName, """{"Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": "*","Resource": ["arn:aws:s3:::*"]}],"Version": "2012-10-17"}""") } match { case Failure(exception) => logger.error("setting bucket acls and policies ex={}", exception.getMessage) case Success(_) => logger.info("acls and policies for bucket {} done", bucketName) } } def getBucketAcl(bucketName: String): Future[AccessControlList] = Future { s3Client.getBucketAcl(bucketName) } def getBucketPolicy(bucketName: String): Future[BucketPolicy] = Future { s3Client.getBucketPolicy(bucketName) } def listBucket: String = { s3Client.listObjects(storageS3Settings.bucketName).getBucketName } }
Example 20
Source File: S3StoreTest.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore package s3 import cats.effect.IO import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} class S3StoreTest extends AbstractStoreTest { val credentials = new BasicAWSCredentials("my_access_key", "my_secret_key") val clientConfiguration = new ClientConfiguration() clientConfiguration.setSignerOverride("AWSS3V4SignerType") val minioHost: String = Option(System.getenv("BLOBSTORE_MINIO_HOST")).getOrElse("minio-container") val minioPort: String = Option(System.getenv("BLOBSTORE_MINIO_PORT")).getOrElse("9000") private val client: AmazonS3 = AmazonS3ClientBuilder.standard() .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( s"http://$minioHost:$minioPort", Regions.US_EAST_1.name())) .withPathStyleAccessEnabled(true) .withClientConfiguration(clientConfiguration) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .build() private val transferManager: TransferManager = TransferManagerBuilder.standard() .withS3Client(client) .build() override val store: Store[IO] = new S3Store[IO](transferManager, blocker = blocker) override val root: String = "blobstore-test-bucket" override def beforeAll(): Unit = { super.beforeAll() try { client.createBucket(root) } catch { case e: com.amazonaws.services.s3.model.AmazonS3Exception if e.getMessage.contains("BucketAlreadyOwnedByYou") => // noop } () } override def afterAll(): Unit = { super.afterAll() try { client.shutdown() } catch { case _: Throwable => } } }
Example 21
Source File: V1DaxClientBuilderUtils.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.utils import com.amazon.dax.client.dynamodbv2.{ AmazonDaxAsyncClientBuilder, AmazonDaxClientBuilder } import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBAsync } import com.github.j5ik2o.akka.persistence.dynamodb.config.client.DynamoDBClientConfig object V1DaxClientBuilderUtils { def setupSync(dynamoDBClientConfig: DynamoDBClientConfig): AmazonDaxClientBuilder = { val cc = V1DaxClientConfigUtils.setup(dynamoDBClientConfig) val builder = AmazonDaxClientBuilder.standard().withClientConfiguration(cc) (dynamoDBClientConfig.accessKeyId, dynamoDBClientConfig.secretAccessKey) match { case (Some(a), Some(s)) => builder.setCredentials( new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) ) case _ => } dynamoDBClientConfig.region.foreach(builder.setRegion) dynamoDBClientConfig.endpoint.foreach { v => builder.setEndpointConfiguration(v.split(","): _*) } builder } def setupAsync(dynamoDBClientConfig: DynamoDBClientConfig): AmazonDaxAsyncClientBuilder = { val cc = V1DaxClientConfigUtils.setup(dynamoDBClientConfig) val builder = AmazonDaxAsyncClientBuilder.standard().withClientConfiguration(cc) (dynamoDBClientConfig.accessKeyId, dynamoDBClientConfig.secretAccessKey) match { case (Some(a), Some(s)) => builder.setCredentials( new AWSStaticCredentialsProvider(new BasicAWSCredentials(a, s)) ) case _ => } dynamoDBClientConfig.region.foreach(builder.setRegion) dynamoDBClientConfig.endpoint.foreach { v => builder.setEndpointConfiguration(v.split(","): _*) } builder } }