com.amazonaws.auth.DefaultAWSCredentialsProviderChain Scala Examples
The following examples show how to use com.amazonaws.auth.DefaultAWSCredentialsProviderChain.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: Producer.scala From kinesis-stream with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import akka.util.ByteString import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration import com.contxt.kinesis.ScalaKinesisProducer object Producer extends App { implicit val system = ActorSystem("kinesis-producer") implicit val ec = system.dispatcher implicit val mat = ActorMaterializer() val producer = ScalaKinesisProducer( "activity-test", new KinesisProducerConfiguration() .setRegion("us-east-1") .setCredentialsProvider(new DefaultAWSCredentialsProviderChain)) Source(1 to 10) .map(i => (i.toString, ByteString(s"Data: $i"))) .mapAsync(1) { case (key, data) => producer.send(key, data.toByteBuffer) } .runWith(Sink.foreach(r => println(s"${r.getShardId}-${r.getSequenceNumber.takeRight(10)}"))) .onComplete { case _ => system.terminate() } }
Example 2
Source File: S3.scala From teamcity-s3-plugin with Apache License 2.0 | 5 votes |
package com.gu.teamcity import java.io.{InputStream, File} import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentialsProviderChain, DefaultAWSCredentialsProviderChain} import com.amazonaws.services.s3.AmazonS3Client import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest, CannedAccessControlList} import com.amazonaws.services.s3.transfer.TransferManager import jetbrains.buildServer.serverSide.SBuild import scala.util.{Success, Try} class S3(config: S3ConfigManager) { val credentialsProvider = { val provider = new AWSCredentialsProviderChain(config, new DefaultAWSCredentialsProviderChain()) provider.setReuseLastProvider(false) provider } val transferManager = new TransferManager( new AmazonS3Client(credentialsProvider, new ClientConfiguration().withMaxErrorRetry(2)) ) def upload(bucket: String, build: SBuild, fileName: String, contents: InputStream, fileSize: Long): Try[Unit] = Try { val uploadDirectory = s"${S3Plugin.cleanFullName(build)}/${build.getBuildNumber}" val metadata = { val md = new ObjectMetadata() md.setContentLength(fileSize) md } val req = new PutObjectRequest(bucket, s"$uploadDirectory/$fileName", contents, metadata) req.withCannedAcl(CannedAccessControlList.BucketOwnerFullControl) val upload = transferManager.upload(req) upload.waitForUploadResult() } def upload(bucket: String, build: SBuild, fileName: String, file: File): Try[Unit] = Try { val uploadDirectory = s"${S3Plugin.cleanFullName(build)}/${build.getBuildNumber}" val req = new PutObjectRequest(bucket, s"$uploadDirectory/$fileName", file) req.withCannedAcl(CannedAccessControlList.BucketOwnerFullControl) val upload = transferManager.upload(req) upload.waitForUploadResult() } }
Example 3
Source File: KinesisProducerClient.scala From fs2-aws with MIT License | 5 votes |
package fs2.aws.internal import java.nio.ByteBuffer import cats.effect.Sync import com.amazonaws.auth.{ AWSCredentialsProviderChain, DefaultAWSCredentialsProviderChain } import com.amazonaws.services.kinesis.producer.{ KinesisProducer, KinesisProducerConfiguration, UserRecordResult } import com.google.common.util.concurrent.ListenableFuture trait KinesisProducerClient[F[_]] { def putData(streamName: String, partitionKey: String, data: ByteBuffer)( implicit F: Sync[F] ): F[ListenableFuture[UserRecordResult]] } class KinesisProducerClientImpl[F[_]] extends KinesisProducerClient[F] { val credentials: AWSCredentialsProviderChain = new DefaultAWSCredentialsProviderChain() val region: Option[String] = None private lazy val config: KinesisProducerConfiguration = { val c = new KinesisProducerConfiguration() .setCredentialsProvider(credentials) region.map(r => c.setRegion(r)) c } private lazy val client = new KinesisProducer(config) override def putData(streamName: String, partitionKey: String, data: ByteBuffer)( implicit F: Sync[F] ): F[ListenableFuture[UserRecordResult]] = F.delay(client.addUserRecord(streamName, partitionKey, data)) }
Example 4
Source File: KinesisStreamConsumerConfig.scala From gfc-aws-kinesis with Apache License 2.0 | 5 votes |
package com.gilt.gfc.aws.kinesis.akka import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain} import com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration} import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory import com.gilt.gfc.aws.kinesis.client.KinesisClientEndpoints import scala.concurrent.duration._ case class KinesisStreamConsumerConfig[T]( streamName: String, applicationName: String, kinesisCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(), dynamoCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(), cloudWatchCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(), metricsFactory: IMetricsFactory = new NullMetricsFactory(), checkPointInterval: Duration = 5.minutes, retryConfig: RetryConfig = RetryConfig(1.second, 1.second, 3), initialPositionInStream: InitialPositionInStream = InitialPositionInStream.LATEST, regionName: Option[String] = None, dynamoDBKinesisAdapterClient: Option[AmazonDynamoDBStreamsAdapterClient] = None, kinesisClientEndpoints: Option[KinesisClientEndpoints] = None, failoverTimeoutMillis: Long = KinesisClientLibConfiguration.DEFAULT_FAILOVER_TIME_MILLIS, maxRecordsPerBatch: Option[Int] = None, idleTimeBetweenReads: FiniteDuration = KinesisClientLibConfiguration.DEFAULT_IDLETIME_BETWEEN_READS_MILLIS.millis ) { def withCommonCredentialsProvider(credentialsProvider: AWSCredentialsProvider): KinesisStreamConsumerConfig[T] = this.copy( kinesisCredentialsProvider = credentialsProvider, dynamoCredentialsProvider = credentialsProvider, cloudWatchCredentialsProvider = credentialsProvider ) }
Example 5
Source File: KCLConfiguration.scala From gfc-aws-kinesis with Apache License 2.0 | 5 votes |
package com.gilt.gfc.aws.kinesis.client import java.util.UUID import scala.concurrent.duration._ import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain} import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration} def apply(applicationName: String , streamName: String , kinesisCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain() , dynamoCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain() , cloudWatchCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain() , regionName: Option[String] = None , initialPositionInStream: InitialPositionInStream = InitialPositionInStream.LATEST , endpointConfiguration: Option[KinesisClientEndpoints] = None , failoverTimeoutMillis: Long = KinesisClientLibConfiguration.DEFAULT_FAILOVER_TIME_MILLIS , maxRecordsPerBatch: Int = KinesisClientLibConfiguration.DEFAULT_MAX_RECORDS , idleTimeBetweenReads: FiniteDuration = KinesisClientLibConfiguration.DEFAULT_IDLETIME_BETWEEN_READS_MILLIS.millis): KinesisClientLibConfiguration = { val dynamoTableName = (s"${applicationName}.${streamName}") .replaceAll("[^a-zA-Z0-9_.-]", "-") val conf = new KinesisClientLibConfiguration( dynamoTableName, streamName, kinesisCredentialsProvider, dynamoCredentialsProvider, cloudWatchCredentialsProvider, s"${HostName}:${UUID.randomUUID()}" ).withRegionName(regionName.orNull) .withInitialPositionInStream(initialPositionInStream) .withFailoverTimeMillis(failoverTimeoutMillis) .withMaxRecords(maxRecordsPerBatch) .withIdleTimeBetweenReadsInMillis(idleTimeBetweenReads.toMillis) endpointConfiguration.fold(conf)( endpoints => conf.withDynamoDBEndpoint(endpoints.dynamoDBEndpoint) .withKinesisEndpoint(endpoints.kinesisEndpoint) ) } }
Example 6
Source File: AWSSigningJestClientFactory.scala From haystack-traces with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trace.commons.clients.es import java.time.{LocalDateTime, ZoneId} import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration import com.google.common.base.Supplier import io.searchbox.client.JestClientFactory import org.apache.http.impl.client.HttpClientBuilder import org.apache.http.impl.nio.client.HttpAsyncClientBuilder import org.slf4j.LoggerFactory import vc.inreach.aws.request.{AWSSigner, AWSSigningRequestInterceptor} import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.auth.BasicAWSCredentials import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.internal.StaticCredentialsProvider class AWSSigningJestClientFactory(awsRequestSigningConfig: AWSRequestSigningConfiguration) extends JestClientFactory { private val LOGGER = LoggerFactory.getLogger(classOf[AWSSigningJestClientFactory]) val awsSigner = new AWSSigner(getCredentialProvider, awsRequestSigningConfig.region, awsRequestSigningConfig.awsServiceName, new ClockSupplier) val requestInterceptor = new AWSSigningRequestInterceptor(awsSigner) override def configureHttpClient(builder: HttpClientBuilder): HttpClientBuilder = { builder.addInterceptorLast(requestInterceptor) } override def configureHttpClient(builder: HttpAsyncClientBuilder): HttpAsyncClientBuilder = { builder.addInterceptorLast(requestInterceptor) } def getCredentialProvider: AWSCredentialsProvider = { if (awsRequestSigningConfig.accessKey.isDefined) { LOGGER.info("using static aws credential provider with access and secret key for ES") new StaticCredentialsProvider( new BasicAWSCredentials(awsRequestSigningConfig.accessKey.get, awsRequestSigningConfig.secretKey.get)) } else { LOGGER.info("using default credential provider chain for ES") new DefaultAWSCredentialsProviderChain } } } class ClockSupplier extends Supplier[LocalDateTime] { override def get(): LocalDateTime = { LocalDateTime.now(ZoneId.of("UTC")) } }
Example 7
Source File: SQSConsumer.scala From sqs-kafka-connect with Apache License 2.0 | 5 votes |
package com.hivehome.kafka.connect.sqs import javax.jms.{JMSException, MessageConsumer, Session} import com.amazon.sqs.javamessaging.SQSConnectionFactory import com.amazonaws.auth.{AWSCredentialsProviderChain, BasicAWSCredentials, DefaultAWSCredentialsProviderChain} import com.amazonaws.internal.StaticCredentialsProvider import com.amazonaws.regions.{Region, Regions} object SQSConsumer { def apply(conf: Conf): MessageConsumer = { val chain = buildCredentialsProviderChain(conf) createSQSConsumer(conf, chain) } @throws(classOf[JMSException]) private def createSQSConsumer(conf: Conf, chain: AWSCredentialsProviderChain): MessageConsumer = { val region = Regions.fromName(conf.awsRegion) val connectionFactory = SQSConnectionFactory.builder .withRegion(Region.getRegion(region)) .withAWSCredentialsProvider(chain) .build val connection = connectionFactory.createConnection val session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE) val queue = session.createQueue(conf.queueName.get) val consumer = session.createConsumer(queue) connection.start() consumer } private def buildCredentialsProviderChain(conf: Conf): AWSCredentialsProviderChain = { (conf.awsKey, conf.awsSecret) match { case (Some(key), Some(secret)) => val credentials = new BasicAWSCredentials(key, secret) new AWSCredentialsProviderChain(new StaticCredentialsProvider(credentials), new DefaultAWSCredentialsProviderChain) case _ => new DefaultAWSCredentialsProviderChain } } }
Example 8
Source File: KibanaForwarder.scala From shield with MIT License | 5 votes |
package shield.actors.listeners import akka.actor.{Actor, ActorLogging} import com.amazonaws.auth.{AWSCredentials, DefaultAWSCredentialsProviderChain} import com.typesafe.config.Config import shield.actors.RestartLogging import org.joda.time.format.DateTimeFormat import org.joda.time.{DateTimeZone, DateTime} import shield.aws.AWSSigningConfig import shield.metrics.Instrumented import spray.client.pipelining._ import spray.http.HttpResponse import shield.aws.AWSImplicits._ import spray.json.DefaultJsonProtocol._ import spray.json._ // todo: ensure useful mapping on the index class KibanaForwarder(id: String, host: String, indexPrefix: String, ttype: String, maxOutstanding: Int, signingParams: AWSSigningConfig) extends Actor with ActorLogging with RestartLogging with Instrumented { implicit val ctx = context.dispatcher // todo: timeout? val awsSigningConfig = signingParams val pipeline = sendReceive val dayFormat = DateTimeFormat.forPattern("yyyy.MM.dd") val outstandingCounter = metrics.counter("outstandingPosts", id) val droppedMeter = metrics.meter("droppedAccessLogs", id) val postTimer = timing("postToKibana", id) def receive = { case LogsFlushed => outstandingCounter -= 1 case AccessLogs(buffer) => if (buffer.nonEmpty) { if (outstandingCounter.count >= maxOutstanding) { droppedMeter.mark(buffer.length) } else postTimer { outstandingCounter += 1 val date = DateTimeFormat.forPattern("yyyy.MM.dd").print(DateTime.now(DateTimeZone.UTC)) // todo: CompactPrint is 1% cpu under load tests. Faster serialization library? val orderedCommands = buffer.flatMap { doc => List( JsObject( "index" -> JsObject( "_index" -> JsString(s"$indexPrefix-$date"), "_type" -> JsString(ttype) ) ).toJson.compactPrint, doc.toJson.compactPrint ) } val req = Post(s"$host/_bulk", orderedCommands.mkString("\n") + "\n").withAWSSigning(awsSigningConfig) pipeline(req) andThen LogCollector.handleResults(self, droppedMeter, log, buffer.length) } } } }
Example 9
Source File: ClientProvider.scala From reactive-nakadi with MIT License | 5 votes |
package org.zalando.react.nakadi.commit.handlers.aws import com.amazonaws.regions.Regions import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient import org.zalando.react.nakadi.properties.CommitProperties trait Provider { def client: DynamoDB def leaseProperties: CommitProperties } class ClientProvider(override val leaseProperties: CommitProperties) extends Provider { private val credentialsProviderChain = new DefaultAWSCredentialsProviderChain() private val region = Regions.fromName(leaseProperties.awsCommitRegion) override val client: DynamoDB = { val c = new AmazonDynamoDBClient(credentialsProviderChain) c.configureRegion(region) new DynamoDB(c) } }
Example 10
Source File: Runner.scala From spark-avro-compactor with Apache License 2.0 | 5 votes |
package ie.ianduffy.spark.avro.compactor import com.amazonaws.auth.{AWSSessionCredentials, DefaultAWSCredentialsProviderChain} import ie.ianduffy.spark.avro.compactor.Utils._ import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient import org.slf4j.LoggerFactory import scala.util.{Failure, Success, Try} object Runner extends App { private val log = LoggerFactory.getLogger(Runner.getClass.getName.replace("$", "")) private val config = JobConfig.parse(args) private val schemaRegistry = new CachedSchemaRegistryClient(config.schemaRegistryUrl, 10000) log.info(s"Running with application config $config") if (System.getenv("local") != null) { log.info(s"Running with embedded spark") runLocally(config) } else { log.info("Running with remote spark") run(config) } def runLocally(config: JobConfig) = { val credentials = new DefaultAWSCredentialsProviderChain().getCredentials.asInstanceOf[AWSSessionCredentials] System.setProperty("spark.master", "local[*]") System.setProperty("spark.app.name", "compactor") System.setProperty("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem") System.setProperty("spark.hadoop.fs.s3a.endpoint", "s3-eu-central-1.amazonaws.com") System.setProperty("spark.hadoop.fs.s3a.access.key", credentials.getAWSAccessKeyId) System.setProperty("spark.hadoop.fs.s3a.secret.key", credentials.getAWSSecretKey) System.setProperty("spark.hadoop.fs.s3a.session.token", credentials.getSessionToken) System.setProperty("spark.hadoop.fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider") System.setProperty("com.amazonaws.services.s3.enforceV4", "true") val spark = createSparkSession log.info(s"Running with spark configuration: ${spark.conf.getAll}") Try { Job.run(spark, schemaRegistry, config) } match { case Success(_) => spark.close() System.exit(0) case Failure(e) => spark.close() e.printStackTrace() System.exit(1) } } def run(config: JobConfig) = { val spark = createSparkSession log.info(s"Running with configuration: ${spark.conf.getAll}") Job.run(spark, schemaRegistry, config) } }
Example 11
Source File: Credentials.scala From spark-select with Apache License 2.0 | 5 votes |
package io.minio.spark.select import java.net.URI // For BasicAWSCredentials import com.amazonaws.auth.AWSCredentials import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.auth.BasicAWSCredentials import com.amazonaws.auth.BasicSessionCredentials import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import org.apache.hadoop.conf.Configuration private[spark] object Credentials { private def staticCredentialsProvider(credentials: AWSCredentials): AWSCredentialsProvider = { new AWSCredentialsProvider { override def getCredentials: AWSCredentials = credentials override def refresh(): Unit = {} } } def load(location: Option[String], hadoopConfiguration: Configuration): AWSCredentialsProvider = { val uri = new URI(location.getOrElse("")) val uriScheme = uri.getScheme uriScheme match { case "s3" | "s3a" => // This matches what S3A does, with one exception: we don't // support anonymous credentials. First, try to parse from URI: Option(uri.getUserInfo).flatMap { userInfo => if (userInfo.contains(":")) { val Array(accessKey, secretKey) = userInfo.split(":") Some(staticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey))) } else { None } }.orElse { val accessKey = hadoopConfiguration.get(s"fs.s3a.access.key", null) val secretKey = hadoopConfiguration.get(s"fs.s3a.secret.key", null) val sessionToken = hadoopConfiguration.get(s"fs.s3a.session.token", null) if (accessKey != null && secretKey != null) { if (sessionToken != null) { Some(staticCredentialsProvider(new BasicSessionCredentials(accessKey, secretKey, sessionToken))) } else { Some(staticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey))) } } else { None } }.getOrElse { // Finally, fall back on the instance profile provider new DefaultAWSCredentialsProviderChain() } case other => throw new IllegalArgumentException(s"Unrecognized scheme $other; expected s3, or s3a") } } }
Example 12
Source File: SSH.scala From spark-deployer with Apache License 2.0 | 5 votes |
package sparkdeployer import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import org.slf4s.Logging import sys.process._ case class SSH( machine: Machine, remoteCommand: Option[String] = None, ttyAllocated: Boolean = false, retryEnabled: Boolean = false, includeAWSCredentials: Boolean = false )(implicit conf: ClusterConf) extends Logging { def withRemoteCommand(cmd: String) = this.copy(remoteCommand = Some(cmd)) def withTTY = this.copy(ttyAllocated = true) def withRetry = this.copy(retryEnabled = true) def withAWSCredentials = this.copy(includeAWSCredentials = true) def getCommandSeq(maskAWS: Boolean) = Seq( "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no" ) .++(conf.pem.fold(Seq.empty[String])(pem => Seq("-i", pem))) .++(if (ttyAllocated) Some("-tt") else None) .:+(conf.user + "@" + machine.address) .++(remoteCommand.map { remoteCommand => if (includeAWSCredentials) { //get aws credentials in formal way val credentials = new DefaultAWSCredentialsProviderChain().getCredentials Seq( "AWS_ACCESS_KEY_ID='" + (if (maskAWS) "*" else credentials.getAWSAccessKeyId) + "'", "AWS_SECRET_ACCESS_KEY='" + (if (maskAWS) "*" else credentials.getAWSSecretKey) + "'", remoteCommand ).mkString(" ") } else remoteCommand }) def run(): Int = { val op = (attempt: Int) => { log.info(s"[${machine.name}] [attempt:${attempt}] ${getCommandSeq(true).mkString(" ")}") val exitValue = getCommandSeq(false).! if (exitValue != 0) { sys.error(s"[${machine.name}] Error when running '${getCommandSeq(true).mkString(" ")}'. exitValue = ${exitValue}.") } else exitValue } if (retryEnabled) Retry(op) else op(1) } }
Example 13
Source File: package.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.regions.Regions import org.apache.spark.rdd.RDD package object spark { implicit class RichRDD[A <: AnyRef](rdd: RDD[A]) { def saveToKinesis(streamName: String, region: Regions, credentials: SparkAWSCredentials = DefaultCredentials, chunk: Int = recordsMaxCount, endpoint: Option[String] = None): Unit = if (!rdd.isEmpty) rdd.sparkContext.runJob(rdd, new KinesisRDDWriter(streamName, region, credentials, chunk, endpoint).write _) } }
Example 14
Source File: AmazonIdentityManagement.scala From sbt-aws-lambda with Apache License 2.0 | 5 votes |
package com.gilt.aws.lambda.wrapper import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.identitymanagement.AmazonIdentityManagementClientBuilder import com.amazonaws.services.identitymanagement.model._ import scala.util.Try trait AmazonIdentityManagement { def listRoles(): Try[ListRolesResult] def createRole(req: CreateRoleRequest): Try[CreateRoleResult] } object AmazonIdentityManagement { def instance(): AmazonIdentityManagement = { val auth = new DefaultAWSCredentialsProviderChain() val client = AmazonIdentityManagementClientBuilder.standard() .withCredentials(auth) .build new AmazonIdentityManagement { def listRoles() = Try(client.listRoles) def createRole(req: CreateRoleRequest) = Try(client.createRole(req)) } } }
Example 15
Source File: AmazonS3.scala From sbt-aws-lambda with Apache License 2.0 | 5 votes |
package com.gilt.aws.lambda.wrapper import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.{Region => _, _} import scala.util.Try import com.gilt.aws.lambda.Region trait AmazonS3 { def listBuckets(): Try[java.util.List[Bucket]] def createBucket(bucket: String): Try[Bucket] def putObject(req: PutObjectRequest): Try[PutObjectResult] } object AmazonS3 { def instance(region: Region): AmazonS3 = { val auth = new DefaultAWSCredentialsProviderChain() val client = AmazonS3ClientBuilder.standard() .withCredentials(auth) .withRegion(region.value) .build new AmazonS3 { def listBuckets() = Try(client.listBuckets) def createBucket(bucket: String) = Try(client.createBucket(bucket)) def putObject(req: PutObjectRequest) = Try(client.putObject(req)) } } }
Example 16
Source File: AwsLambda.scala From sbt-aws-lambda with Apache License 2.0 | 5 votes |
package com.gilt.aws.lambda.wrapper import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.lambda.AWSLambdaClientBuilder import com.amazonaws.services.lambda.model._ import scala.util.Try import com.gilt.aws.lambda.Region trait AwsLambda { def createFunction(req: CreateFunctionRequest): Try[CreateFunctionResult] def updateFunctionCode(req: UpdateFunctionCodeRequest): Try[UpdateFunctionCodeResult] def getFunctionConfiguration(req: GetFunctionConfigurationRequest): Try[GetFunctionConfigurationResult] def updateFunctionConfiguration(req: UpdateFunctionConfigurationRequest): Try[UpdateFunctionConfigurationResult] def tagResource(req: TagResourceRequest): Try[TagResourceResult] def publishVersion( request: PublishVersionRequest): Try[PublishVersionResult] } object AwsLambda { def instance(region: Region): AwsLambda = { val auth = new DefaultAWSCredentialsProviderChain() val client = AWSLambdaClientBuilder.standard() .withCredentials(auth) .withRegion(region.value) .build new AwsLambda { def createFunction(req: CreateFunctionRequest) = Try(client.createFunction(req)) def updateFunctionCode(req: UpdateFunctionCodeRequest) = Try(client.updateFunctionCode(req)) def getFunctionConfiguration(req: GetFunctionConfigurationRequest) = Try(client.getFunctionConfiguration(req)) def updateFunctionConfiguration(req: UpdateFunctionConfigurationRequest) = Try(client.updateFunctionConfiguration(req)) def tagResource(req: TagResourceRequest) = Try(client.tagResource(req)) def publishVersion(request: PublishVersionRequest) = Try(client.publishVersion(request)) } } }
Example 17
Source File: S3Client.scala From akka-persistence-s3 with MIT License | 5 votes |
package akka.persistence.s3 import java.io.InputStream import com.amazonaws.auth.{ BasicAWSCredentials, DefaultAWSCredentialsProviderChain } import com.amazonaws.services.s3.{ S3ClientOptions, AmazonS3Client } import com.amazonaws.services.s3.model._ import scala.concurrent.{ Future, ExecutionContext } trait S3Client { val s3ClientConfig: S3ClientConfig lazy val client: AmazonS3Client = { val client = if (s3ClientConfig.awsUseDefaultCredentialsProviderChain) new AmazonS3Client(new DefaultAWSCredentialsProviderChain).withRegion(s3ClientConfig.region) else new AmazonS3Client(new BasicAWSCredentials(s3ClientConfig.awsKey, s3ClientConfig.awsSecret)) s3ClientConfig.endpoint.foreach { endpoint => client.withEndpoint(endpoint) () } client.setS3ClientOptions(new S3ClientOptions() .withPathStyleAccess(s3ClientConfig.options.pathStyleAccess) .withChunkedEncodingDisabled(s3ClientConfig.options.chunkedEncodingDisabled)) client } def createBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Bucket] = Future { client.createBucket(bucketName) } def deleteBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteBucket(bucketName) } def putObject(bucketName: String, key: String, input: InputStream, metadata: ObjectMetadata)(implicit ec: ExecutionContext): Future[PutObjectResult] = Future { client.putObject(new PutObjectRequest(bucketName, key, input, metadata)) } def getObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[S3Object] = Future { client.getObject(new GetObjectRequest(bucketName, key)) } def listObjects(request: ListObjectsRequest)(implicit ec: ExecutionContext): Future[ObjectListing] = Future { client.listObjects(request) } def deleteObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteObject(bucketName, key) } def deleteObjects(request: DeleteObjectsRequest)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteObjects(request) } }
Example 18
Source File: SqsSettings.scala From akka-stream-sqs with Apache License 2.0 | 5 votes |
package me.snov.akka.sqs.client import akka.actor.ActorSystem import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.sqs.AmazonSQSAsync import com.typesafe.config.Config import collection.JavaConverters._ object SqsSettings { private val defaultAWSCredentialsProvider = new DefaultAWSCredentialsProviderChain() private val defaultAWSClientConfiguration = new ClientConfiguration() private val defaultMaxNumberOfMessages = 10 private val defaultWaitTimeSeconds = 10 private val configurationRoot = "akka-stream-sqs" def apply( queueUrl: String, maxNumberOfMessages: Int = defaultMaxNumberOfMessages, waitTimeSeconds: Int = defaultWaitTimeSeconds, awsCredentialsProvider: Option[AWSCredentialsProvider] = None, awsClientConfiguration: Option[ClientConfiguration] = None, awsClient: Option[AmazonSQSAsync] = None, endpoint: Option[EndpointConfiguration] = None, visibilityTimeout: Option[Int] = None, messageAttributes: Seq[String] = List() ): SqsSettings = new SqsSettings( queueUrl = queueUrl, maxNumberOfMessages = maxNumberOfMessages, waitTimeSeconds = waitTimeSeconds, awsClient = awsClient, endpoint = endpoint, awsCredentialsProvider = awsCredentialsProvider.getOrElse(defaultAWSCredentialsProvider), awsClientConfiguration = awsClientConfiguration.getOrElse(defaultAWSClientConfiguration), visibilityTimeout = visibilityTimeout, messageAttributes = messageAttributes ) def apply(system: ActorSystem): SqsSettings = apply(system, None, None) def apply( system: ActorSystem, awsCredentialsProvider: Option[AWSCredentialsProvider], awsClientConfiguration: Option[ClientConfiguration] ): SqsSettings = apply(system.settings.config.getConfig(configurationRoot), awsCredentialsProvider, awsClientConfiguration) def apply(config: Config): SqsSettings = apply(config, None, None) def apply( config: Config, awsCredentialsProvider: Option[AWSCredentialsProvider], awsClientConfiguration: Option[ClientConfiguration] ): SqsSettings = { apply( queueUrl = config.getString("queue-url"), maxNumberOfMessages = if (config.hasPath("max-number-of-messages")) config.getInt("max-number-of-messages") else defaultMaxNumberOfMessages, waitTimeSeconds = if (config.hasPath("wait-time-seconds")) config.getInt("wait-time-seconds") else defaultWaitTimeSeconds, awsCredentialsProvider = awsCredentialsProvider, awsClientConfiguration = awsClientConfiguration, endpoint = if (config.hasPath("endpoint") && config.hasPath("region")) Some(new EndpointConfiguration(config.getString("endpoint"), config.getString("region"))) else None, visibilityTimeout = if (config.hasPath("visibility-timeout")) Some(config.getInt("visibility-timeout")) else None, messageAttributes = if (config.hasPath("message-attributes")) config.getStringList("message-attributes").asScala else List() ) } } case class SqsSettings(queueUrl: String, maxNumberOfMessages: Int, waitTimeSeconds: Int, awsClient: Option[AmazonSQSAsync], endpoint: Option[EndpointConfiguration], awsCredentialsProvider: AWSCredentialsProvider, awsClientConfiguration: ClientConfiguration, visibilityTimeout: Option[Int], messageAttributes: Seq[String])
Example 19
Source File: EagerAWSDaleks.scala From aws-training-demo with Apache License 2.0 | 5 votes |
package aws.daleks.eager import com.amazonaws.regions.Regions import com.amazonaws.regions.Regions._ import com.amazonaws.auth.ClasspathPropertiesFileCredentialsProvider import com.amazonaws.regions.Region import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.auth.profile.ProfileCredentialsProvider object EagerAWSDaleks extends App { println("EXTERMINATE!") def findArg(arg:String):Option[String] = { val i = args.indexOf(s"-$arg") if ( i >= 0) Option(args(i+1)) else None } val profile = findArg("profile") implicit val credentials = profile match { case Some(prf) => new ProfileCredentialsProvider(prf) case None => new DefaultAWSCredentialsProviderChain } val excludedRegions = List(GovCloud,CN_NORTH_1) val regions = Regions.values diff excludedRegions println(s"Exterminating regions [${regions.mkString(",")}]") val globals = List( new EagerRoute53Dalek(), new EagerIAMDalek()) val regionals = regions .map { Region.getRegion(_) } .par .foreach { implicit region => println("Preparing extermination of region ["+region+"]") List(new EagerS3Dalek, new EagerBeanstalkDalek, new EagerCloudFormationDalek, new EagerDynamoDBDalek, new EagerElastiCacheDalek, new EagerEMRDalek, new EagerRDSDalek, new EagerS3Dalek, new EagerSNSDalek, new EagerSQSDalek, new EagerEC2Dalek) foreach {_.exterminate} } globals foreach { _.exterminate } println("EXTERMINATE!") }