com.amazonaws.regions.Regions Scala Examples
The following examples show how to use com.amazonaws.regions.Regions.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: AmazonKinesisFirehose.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesisfirehose import com.amazonaws.ClientConfiguration import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.regions.Regions import com.amazonaws.services.kinesisfirehose.{AmazonKinesisFirehose => AWSKinesisFirehose, AmazonKinesisFirehoseClientBuilder} import jp.co.bizreach.kinesisfirehose.action.PutRecordAction object AmazonKinesisFirehose { def apply()(implicit region: Regions): AmazonKinesisFirehose = { new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesisFirehose = { new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard .withCredentials(credentials) .withRegion(region) .build()) with PutRecordAction } def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesisFirehose = { new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesisFirehose = { new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard .withCredentials(credentials) .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(client: AWSKinesisFirehose): AmazonKinesisFirehose = { new AmazonKinesisFirehose(client) with PutRecordAction } } class AmazonKinesisFirehose(client: AWSKinesisFirehose) { self: PutRecordAction => def putRecordBatchWithRetry(request: PutRecordBatchRequest): Seq[Either[PutRecordBatchResponseEntry, PutRecordBatchResponseEntry]] = { withPutBatchRetry(request.records){ entry => client.putRecordBatch(PutRecordBatchRequest(request.deliveryStreamName, entry)) } } def shutdown(): Unit = { client.shutdown() } }
Example 2
Source File: ImageURIProvider.scala From sagemaker-spark with Apache License 2.0 | 5 votes |
package com.amazonaws.services.sagemaker.sparksdk.algorithms import com.amazonaws.regions.Regions private[algorithms] object SageMakerImageURIProvider { def isChinaRegion(region: String): Boolean = { val chinaRegions = Set( Regions.CN_NORTH_1.getName, Regions.CN_NORTHWEST_1.getName ) chinaRegions.contains(region) } def getImage(region: String, regionAccountMap: Map[String, String], algorithmName: String, algorithmTag: String): String = { val account = regionAccountMap.get(region) account match { case None => throw new RuntimeException(s"The region $region is not supported." + s"Supported Regions: ${regionAccountMap.keys.mkString(", ")}") case _ if isChinaRegion(region) => s"${account.get}.dkr.ecr.${region}.amazonaws.com.cn/${algorithmName}:${algorithmTag}" case _ => s"${account.get}.dkr.ecr.${region}.amazonaws.com/${algorithmName}:${algorithmTag}" } } } private[algorithms] object SagerMakerRegionAccountMaps { // For KMeans, PCA, Linear Learner, FactorizationMachines val AlgorithmsAccountMap: Map[String, String] = Map( Regions.EU_WEST_1.getName -> "438346466558", Regions.US_EAST_1.getName -> "382416733822", Regions.US_EAST_2.getName -> "404615174143", Regions.US_WEST_2.getName -> "174872318107", Regions.AP_NORTHEAST_1.getName -> "351501993468", Regions.AP_NORTHEAST_2.getName -> "835164637446", Regions.EU_CENTRAL_1.getName -> "664544806723", Regions.AP_SOUTHEAST_2.getName -> "712309505854", Regions.GovCloud.getName -> "226302683700", Regions.AP_SOUTH_1.getName -> "991648021394", Regions.AP_SOUTHEAST_1.getName -> "475088953585", Regions.CA_CENTRAL_1.getName -> "469771592824", Regions.EU_WEST_2.getName -> "644912444149", Regions.US_WEST_1.getName -> "632365934929", Regions.AP_EAST_1.getName -> "286214385809", Regions.SA_EAST_1.getName -> "855470959533", Regions.EU_NORTH_1.getName -> "669576153137", Regions.EU_WEST_3.getName -> "749696950732", Regions.EU_WEST_3.getName -> "749696950732", Regions.ME_SOUTH_1.getName -> "249704162688", Regions.CN_NORTH_1.getName -> "390948362332", Regions.CN_NORTHWEST_1.getName -> "387376663083" ) // For LDA val LDAAccountMap: Map[String, String] = Map( Regions.EU_WEST_1.getName -> "999678624901", Regions.US_EAST_1.getName -> "766337827248", Regions.US_EAST_2.getName -> "999911452149", Regions.US_WEST_2.getName -> "266724342769", Regions.AP_NORTHEAST_1.getName -> "258307448986", Regions.AP_NORTHEAST_2.getName -> "293181348795", Regions.EU_CENTRAL_1.getName -> "353608530281", Regions.AP_SOUTHEAST_2.getName -> "297031611018", Regions.GovCloud.getName -> "226302683700", Regions.AP_SOUTH_1.getName -> "991648021394", Regions.AP_SOUTHEAST_1.getName -> "475088953585", Regions.CA_CENTRAL_1.getName -> "469771592824", Regions.EU_WEST_2.getName -> "644912444149", Regions.US_WEST_1.getName -> "632365934929" ) // For XGBoost val ApplicationsAccountMap: Map[String, String] = Map( Regions.EU_WEST_1.getName -> "685385470294", Regions.US_EAST_1.getName -> "811284229777", Regions.US_EAST_2.getName -> "825641698319", Regions.US_WEST_2.getName -> "433757028032", Regions.AP_NORTHEAST_1.getName -> "501404015308", Regions.AP_NORTHEAST_2.getName -> "306986355934", Regions.EU_CENTRAL_1.getName -> "813361260812", Regions.AP_SOUTHEAST_2.getName -> "544295431143", Regions.GovCloud.getName -> "226302683700", Regions.AP_SOUTH_1.getName -> "991648021394", Regions.AP_SOUTHEAST_1.getName -> "475088953585", Regions.CA_CENTRAL_1.getName -> "469771592824", Regions.EU_WEST_2.getName -> "644912444149", Regions.US_WEST_1.getName -> "632365934929", Regions.AP_EAST_1.getName -> "286214385809", Regions.SA_EAST_1.getName -> "855470959533", Regions.EU_NORTH_1.getName -> "669576153137", Regions.EU_WEST_3.getName -> "749696950732", Regions.EU_WEST_3.getName -> "749696950732", Regions.ME_SOUTH_1.getName -> "249704162688", Regions.CN_NORTH_1.getName -> "390948362332", Regions.CN_NORTHWEST_1.getName -> "387376663083" ) }
Example 3
Source File: AwsNodeDiscoverer.scala From haystack-traces with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trace.storage.backends.cassandra.client import java.util.Collections import com.amazonaws.regions.{Region, Regions} import com.amazonaws.services.ec2.AmazonEC2Client import com.amazonaws.services.ec2.model.{DescribeInstancesRequest, Filter, Instance, InstanceStateName} import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ object AwsNodeDiscoverer { private val LOGGER = LoggerFactory.getLogger(AwsNodeDiscoverer.getClass) private[haystack] def discover(client: AmazonEC2Client, tags: Map[String, String]): Seq[String] = { val filters = tags.map { case (key, value) => new Filter("tag:" + key, Collections.singletonList(value)) } val request = new DescribeInstancesRequest().withFilters(filters.asJavaCollection) val result = client.describeInstances(request) val nodes = result.getReservations .asScala .flatMap(_.getInstances.asScala) .filter(isValidInstance) .map(_.getPrivateIpAddress) LOGGER.info("EC2 nodes discovered [{}]", nodes.mkString(",")) nodes } // check if an ec2 instance is in running state private def isValidInstance(instance: Instance): Boolean = { // instance should be in running state InstanceStateName.Running.toString.equals(instance.getState.getName) } }
Example 4
Source File: SQSSupport.scala From sqs-kafka-connect with Apache License 2.0 | 5 votes |
package com.hivehome.kafka.connect.sqs import com.amazonaws.regions.{Region, Regions} import com.amazonaws.services.sqs.AmazonSQSClient import com.amazonaws.services.sqs.model.{CreateQueueRequest, SendMessageRequest, SendMessageResult} import org.scalacheck.Gen import scala.collection.JavaConverters._ trait SQSSupport { val queueName = Gen.alphaStr .map(a => s"test-connect-${a.take(10)}") .sample.get var queueUrl: String = null val sqs = new AmazonSQSClient() sqs.setRegion(Region.getRegion(Regions.EU_WEST_1)) def createQueue(): Unit = { val request = new CreateQueueRequest(queueName) .withAttributes(Map("VisibilityTimeout" -> "2").asJava) val result = sqs.createQueue(request) queueUrl = result.getQueueUrl println("Url for created Queue = " + queueUrl) } def deleteQueue(): Unit = { sqs.deleteQueue(queueUrl) } def sendMessage(msgText: String): SendMessageResult = { sqs.sendMessage(new SendMessageRequest() .withQueueUrl(queueUrl) .withMessageBody(msgText)) } }
Example 5
Source File: SQSConsumer.scala From sqs-kafka-connect with Apache License 2.0 | 5 votes |
package com.hivehome.kafka.connect.sqs import javax.jms.{JMSException, MessageConsumer, Session} import com.amazon.sqs.javamessaging.SQSConnectionFactory import com.amazonaws.auth.{AWSCredentialsProviderChain, BasicAWSCredentials, DefaultAWSCredentialsProviderChain} import com.amazonaws.internal.StaticCredentialsProvider import com.amazonaws.regions.{Region, Regions} object SQSConsumer { def apply(conf: Conf): MessageConsumer = { val chain = buildCredentialsProviderChain(conf) createSQSConsumer(conf, chain) } @throws(classOf[JMSException]) private def createSQSConsumer(conf: Conf, chain: AWSCredentialsProviderChain): MessageConsumer = { val region = Regions.fromName(conf.awsRegion) val connectionFactory = SQSConnectionFactory.builder .withRegion(Region.getRegion(region)) .withAWSCredentialsProvider(chain) .build val connection = connectionFactory.createConnection val session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE) val queue = session.createQueue(conf.queueName.get) val consumer = session.createConsumer(queue) connection.start() consumer } private def buildCredentialsProviderChain(conf: Conf): AWSCredentialsProviderChain = { (conf.awsKey, conf.awsSecret) match { case (Some(key), Some(secret)) => val credentials = new BasicAWSCredentials(key, secret) new AWSCredentialsProviderChain(new StaticCredentialsProvider(credentials), new DefaultAWSCredentialsProviderChain) case _ => new DefaultAWSCredentialsProviderChain } } }
Example 6
Source File: DynamoDBContainerSpecSupport.scala From reactive-aws-clients with MIT License | 5 votes |
package com.github.j5ik2o.reactive.aws.dynamodb import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder } import com.github.j5ik2o.reactive.aws.test.RandomPortSupport import com.spotify.docker.client.{ DefaultDockerClient, DockerClient } import com.whisk.docker.impl.spotify.SpotifyDockerFactory import com.whisk.docker.scalatest.DockerTestKit import com.whisk.docker.{ DockerCommandExecutor, DockerContainer, DockerContainerState, DockerFactory, DockerReadyChecker } import org.scalatest.Suite import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } trait DynamoDBContainerSpecSupport extends DockerTestKit with RandomPortSupport { this: Suite => protected val connectTimeout: FiniteDuration = 3 seconds protected val readTimeout: FiniteDuration = 3 seconds protected val readyCheckInterval: FiniteDuration = 1 seconds protected val dockerClient: DockerClient = DefaultDockerClient .fromEnv() .connectTimeoutMillis(connectTimeout.toMillis) .readTimeoutMillis(readTimeout.toMillis).build() protected lazy val accessKeyId = "x" protected lazy val secretAccessKey = "x" protected lazy val endpoint = s"http://127.0.0.1:$dynamoDBPort" protected def dynamoDbClient: AmazonDynamoDB = AmazonDynamoDBClientBuilder .standard().withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials(accessKeyId, secretAccessKey) ) ).withEndpointConfiguration( new EndpointConfiguration(endpoint, Regions.AP_NORTHEAST_1.getName) ).build() override implicit def dockerFactory: DockerFactory = new SpotifyDockerFactory(dockerClient) protected class DynamoDBDockerReadyChecker(dynamoDbClient: AmazonDynamoDB) extends DockerReadyChecker { override def apply(container: DockerContainerState)( implicit docker: DockerCommandExecutor, ec: ExecutionContext ): Future[Boolean] = Future.successful { try { dynamoDbClient.listTables(1) Thread.sleep(readyCheckInterval.toMillis) true } catch { case _: Exception => Thread.sleep(readyCheckInterval.toMillis) false } } } protected lazy val dynamoDBPort: Int = temporaryServerPort() protected lazy val dynamoDBContainer: DockerContainer = DockerContainer("amazon/dynamodb-local:1.12.0") .withPorts(8000 -> Some(dynamoDBPort)) .withReadyChecker(new DynamoDBDockerReadyChecker(dynamoDbClient)) abstract override def dockerContainers: List[DockerContainer] = dynamoDBContainer :: super.dockerContainers }
Example 7
Source File: DynamoDBEmbeddedSpecSupport.scala From reactive-aws-clients with MIT License | 5 votes |
package com.github.j5ik2o.reactive.aws.dynamodb import java.io.File import java.util.logging.{ Level, Logger } import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.dynamodbv2.local.server.{ DynamoDBProxyServer, LocalDynamoDBRequestHandler, LocalDynamoDBServerHandler } import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBClientBuilder } import com.github.j5ik2o.reactive.aws.test.RandomPortSupport import org.scalatest.{ BeforeAndAfterAll, Suite } import org.seasar.util.io.ResourceUtil import scala.concurrent.duration._ @SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.Var", "org.wartremover.warts.While")) trait DynamoDBEmbeddedSpecSupport extends BeforeAndAfterAll with RandomPortSupport { this: Suite => protected val waitIntervalForDynamoDBLocal: FiniteDuration = 500 milliseconds protected def sqlite4javaLibraryPath: File = new File(ResourceUtil.getBuildDir(getClass), "/../../../native-libs") protected val region: Regions = Regions.AP_NORTHEAST_1 protected lazy val accessKeyId: String = "x" protected lazy val secretAccessKey: String = "x" protected lazy val dynamoDBPort: Int = temporaryServerPort() protected lazy val dynamoDBEndpoint: String = s"http://127.0.0.1:$dynamoDBPort" protected lazy val dynamoDBProxyServer: DynamoDBProxyServer = { System.setProperty("sqlite4java.library.path", sqlite4javaLibraryPath.toString) val inMemory = true // scalastyle:off val dbPath = null val sharedDb = false val corsParams = null // scalastyle:on new DynamoDBProxyServer( dynamoDBPort, new LocalDynamoDBServerHandler( new LocalDynamoDBRequestHandler(0, inMemory, dbPath, sharedDb, false), corsParams ) ) } protected lazy val dynamoDBClient: AmazonDynamoDB = { AmazonDynamoDBClientBuilder .standard().withCredentials( new AWSStaticCredentialsProvider( new BasicAWSCredentials(accessKeyId, secretAccessKey) ) ) .withEndpointConfiguration( new EndpointConfiguration(dynamoDBEndpoint, region.getName) ).build() } protected def waitDynamoDBLocal(): Unit = { var isWaken: Boolean = false while (!isWaken) { try { dynamoDBClient.listTables() isWaken = true } catch { case _: Exception => Thread.sleep(waitIntervalForDynamoDBLocal.toMillis) } } } protected def startDynamoDBLocal(): Unit = { Logger.getLogger("com.almworks.sqlite4java").setLevel(Level.OFF) dynamoDBProxyServer.start() } protected def shutdownDynamoDBLocal(): Unit = { dynamoDBProxyServer.stop() } override protected def beforeAll(): Unit = { super.beforeAll() startDynamoDBLocal() waitDynamoDBLocal() } override protected def afterAll(): Unit = { shutdownDynamoDBLocal() super.afterAll() } }
Example 8
Source File: ClientProvider.scala From reactive-nakadi with MIT License | 5 votes |
package org.zalando.react.nakadi.commit.handlers.aws import com.amazonaws.regions.Regions import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient import org.zalando.react.nakadi.properties.CommitProperties trait Provider { def client: DynamoDB def leaseProperties: CommitProperties } class ClientProvider(override val leaseProperties: CommitProperties) extends Provider { private val credentialsProviderChain = new DefaultAWSCredentialsProviderChain() private val region = Regions.fromName(leaseProperties.awsCommitRegion) override val client: DynamoDB = { val c = new AmazonDynamoDBClient(credentialsProviderChain) c.configureRegion(region) new DynamoDB(c) } }
Example 9
Source File: InvokeMigrationHandler.scala From flyway-awslambda with MIT License | 5 votes |
package crossroad0201.aws.flywaylambda import java.io.{BufferedOutputStream, InputStream, OutputStream, PrintWriter} import com.amazonaws.regions.{Region, Regions} import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler} import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client} import scala.io.{BufferedSource, Codec} import scala.util.{Failure, Success, Try} class InvokeMigrationHandler extends RequestStreamHandler with S3MigrationHandlerBase { type BucketName = String type Prefix = String type ConfFileName = String override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = { def parseInput: Try[(BucketName, Prefix, ConfFileName)] = Try { import spray.json._ import DefaultJsonProtocol._ val json = new BufferedSource(input)(Codec("UTF-8")).mkString val jsObj = JsonParser(json).toJson.asJsObject jsObj.getFields( "bucket_name", "prefix" ) match { case Seq(JsString(b), JsString(p)) => { jsObj.getFields( "flyway_conf" ) match { case Seq(JsString(c)) => (b, p, c) case _ => (b, p, "flyway.conf") } } case _ => throw new IllegalArgumentException(s"Missing require key [bucketName, prefix]. - $json") } } val logger = context.getLogger implicit val s3Client: AmazonS3 = new AmazonS3Client().withRegion(Region.getRegion(Regions.fromName(sys.env("AWS_REGION")))) (for { i <- parseInput _ = { logger.log(s"Flyway migration start. by invoke lambda function(${i._1}, ${i._2}, ${i._3}).") } r <- migrate(i._1, i._2, i._3)(context, s3Client) } yield r) match { case Success(r) => logger.log(r) val b = r.getBytes("UTF-8") val bout = new BufferedOutputStream(output) Stream.continually(bout.write(b)) bout.flush() case Failure(e) => e.printStackTrace() val w = new PrintWriter(output) w.write(e.toString) w.flush() } } }
Example 10
Source File: S3EventMigrationHandler.scala From flyway-awslambda with MIT License | 5 votes |
package crossroad0201.aws.flywaylambda import com.amazonaws.regions.{Region, Regions} import com.amazonaws.services.lambda.runtime.events.S3Event import com.amazonaws.services.lambda.runtime.{Context, RequestHandler} import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client} import scala.util.{Failure, Success} class S3EventMigrationHandler extends RequestHandler[S3Event, Unit] with S3MigrationHandlerBase { override def handleRequest(event: S3Event, context: Context): Unit = { val logger = context.getLogger implicit val s3Client: AmazonS3 = new AmazonS3Client().withRegion(Region.getRegion(Regions.fromName(event.getRecords.get(0).getAwsRegion))) logger.log(s"Flyway migration start. by ${event.getRecords.get(0).getEventName} s3://${event.getRecords.get(0).getS3.getBucket.getName}/${event.getRecords.get(0).getS3.getObject.getKey}") val s3 = event.getRecords.get(0).getS3 val migrationPrefix = { val objectKey = s3.getObject.getKey objectKey.substring(0, objectKey.lastIndexOf("/")) } migrate(s3.getBucket.getName, migrationPrefix)(context, s3Client) match { case Success(r) => logger.log(r) case Failure(e) => e.printStackTrace() } } }
Example 11
Source File: SendSqsMessage.scala From hyperion with Apache License 2.0 | 5 votes |
package com.krux.hyperion.contrib.activity.notification import scala.collection.JavaConverters._ import com.amazonaws.regions.Regions import com.amazonaws.services.sqs.AmazonSQSClientBuilder import com.amazonaws.services.sqs.model.{MessageAttributeValue, SendMessageRequest} import scopt.OptionParser object SendSqsMessage { case class Options( region: Option[String] = None, queueUrl: Option[String] = None, message: Option[String] = None, delaySeconds: Option[Int] = Option(0), attributes: Map[String, String] = Map.empty ) def apply(options: Options): Boolean = try { // Setup the SQS client val sqsBuilder = AmazonSQSClientBuilder.standard() val sqs = options.region .map(regionName => sqsBuilder.withRegion(Regions.fromName(regionName))) .getOrElse(sqsBuilder) .build() // Create the request from the options specified val request = new SendMessageRequest() options.queueUrl.foreach(request.setQueueUrl) options.message.foreach(request.setMessageBody) options.delaySeconds.foreach(request.setDelaySeconds(_)) // Add the message attributes if any if (options.attributes.nonEmpty) { request.setMessageAttributes(options.attributes.flatMap { case (k, v) => k.split(":").toList match { case key :: dataType :: Nil => Option(key -> new MessageAttributeValue().withStringValue(v).withDataType(dataType)) case key :: Nil => Option(key -> new MessageAttributeValue().withStringValue(v).withDataType("String")) case _ => None } }.asJava) } // Publish the message val response = sqs.sendMessage(request) // Print out the message-id to output println(response.getMessageId) true } catch { case e: Throwable => System.err.println(e.getMessage) false } def main(args: Array[String]): Unit = { val parser = new OptionParser[Options](s"hyperion-notification-sqs-activity") { override def showUsageOnError = false note( """Sends a notification message to a SQS Queue. """.stripMargin ) help("help").text("prints this usage text") opt[String]("region").valueName("REGION").optional().action((x, c) => c.copy(region = Option(x))) .text("Sets the region to REGION") opt[String]("queue").valueName("URL").required().action((x, c) => c.copy(queueUrl = Option(x))) .text("Sends the message to the given queue URL") opt[String]("message").valueName("TEXT").required().action((x, c) => c.copy(message = Option(x))) .text("Sends the given TEXT as the message") opt[Int]("delay").valueName("SECONDS").optional().action((x, c) => c.copy(delaySeconds = Option(x))) .text("Delays sending the message for SECONDS") opt[Map[String, String]]("attributes").valueName("k1=v1,k2:type=v2...").action((x, c) => c.copy(attributes = x)) .text("Sets the messages attributes") } if (!parser.parse(args, Options()).exists(apply)) { parser.showUsageAsError System.exit(3) } } }
Example 12
Source File: SendSnsMessage.scala From hyperion with Apache License 2.0 | 5 votes |
package com.krux.hyperion.contrib.activity.notification import scala.collection.JavaConverters._ import com.amazonaws.regions.Regions import com.amazonaws.services.sns.AmazonSNSClientBuilder import com.amazonaws.services.sns.model.{MessageAttributeValue, PublishRequest} import scopt.OptionParser object SendSnsMessage { case class Options( region: Option[String] = None, topicArn: Option[String] = None, message: Option[String] = None, subject: Option[String] = None, json: Boolean = false, attributes: Map[String, String] = Map.empty ) def apply(options: Options): Boolean = try { // Setup the SNS client val snsClientBuilder: AmazonSNSClientBuilder = AmazonSNSClientBuilder.standard() val sns = options.region .map(regionName => snsClientBuilder.withRegion(Regions.fromName(regionName))) .getOrElse(snsClientBuilder) .build() // Create the request from the options specified val request = new PublishRequest() options.topicArn.foreach(request.setTopicArn) options.message.foreach(request.setMessage) options.subject.foreach(request.setSubject) if (options.json) request.setMessageStructure("json") // Add the message attributes if any if (options.attributes.nonEmpty) { request.setMessageAttributes(options.attributes.flatMap { case (k, v) => k.split(":").toList match { case key :: dataType :: Nil => Option(key -> new MessageAttributeValue().withStringValue(v).withDataType(dataType)) case key :: Nil => Option(key -> new MessageAttributeValue().withStringValue(v).withDataType("String")) case _ => None } }.asJava) } // Publish the message val response = sns.publish(request) // Print out the message-id to output println(response.getMessageId) true } catch { case e: Throwable => System.err.println(s"${e.getMessage}\n") false } def main(args: Array[String]): Unit = { val parser = new OptionParser[Options](s"hyperion-notification-sns-activity") { override def showUsageOnError = false note( """Sends a notification message to a SNS Topic. """.stripMargin ) help("help").text("prints this usage text") opt[String]("region").valueName("REGION").optional().action((x, c) => c.copy(region = Option(x))) .text("Sets the region to REGION") opt[String]("topic-arn").valueName("ARN").required().action((x, c) => c.copy(topicArn = Option(x))) .text("Sends the message to the given topic ARN") opt[Unit]("json").optional().action((_, c) => c.copy(json = true)) .text("Interprets the message TEXT as a structured JSON message") opt[String]("message").valueName("TEXT").required().action((x, c) => c.copy(message = Option(x))) .text("Sends the given TEXT as the message") opt[String]("subject").valueName("TEXT").optional().action((x, c) => c.copy(subject = Option(x))) .text("Sends the given TEXT as the subject") opt[Map[String, String]]("attributes").valueName("k1=v1,k2:type=v2...").action((x, c) => c.copy(attributes = x)) .text("Sets the messages attributes") } if (!parser.parse(args, Options()).exists(apply)) { parser.showUsageAsError System.exit(3) } } }
Example 13
Source File: AwsEndpointConfiguration.scala From embulk-output-s3_parquet with MIT License | 5 votes |
package org.embulk.output.s3_parquet.aws import java.util.Optional import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.{DefaultAwsRegionProviderChain, Regions} import org.embulk.config.{Config, ConfigDefault} import org.embulk.output.s3_parquet.aws.AwsEndpointConfiguration.Task import scala.util.Try object AwsEndpointConfiguration { trait Task { @Config("endpoint") @ConfigDefault("null") def getEndpoint: Optional[String] @Config("region") @ConfigDefault("null") def getRegion: Optional[String] } def apply(task: Task): AwsEndpointConfiguration = { new AwsEndpointConfiguration(task) } } class AwsEndpointConfiguration(task: Task) { def configureAwsClientBuilder[S <: AwsClientBuilder[S, T], T]( builder: AwsClientBuilder[S, T] ): Unit = { if (task.getRegion.isPresent && task.getEndpoint.isPresent) { val ec = new EndpointConfiguration(task.getEndpoint.get, task.getRegion.get) builder.setEndpointConfiguration(ec) } else if (task.getRegion.isPresent && !task.getEndpoint.isPresent) { builder.setRegion(task.getRegion.get) } else if (!task.getRegion.isPresent && task.getEndpoint.isPresent) { val r: String = Try(new DefaultAwsRegionProviderChain().getRegion) .getOrElse(Regions.DEFAULT_REGION.getName) val e: String = task.getEndpoint.get val ec = new EndpointConfiguration(e, r) builder.setEndpointConfiguration(ec) } } }
Example 14
Source File: AmazonKinesisAsync.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis import com.amazonaws.ClientConfiguration import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.handlers.AsyncHandler import com.amazonaws.regions.Regions import com.amazonaws.services.kinesis.{AmazonKinesisAsync => AWSKinesisAsync, AmazonKinesisAsyncClientBuilder} import com.amazonaws.services.kinesis.model.{ PutRecordRequest => AWSPutRecordRequest, PutRecordResult => AWSPutRecordResult, PutRecordsRequest => AWSPutRecordsRequest, PutRecordsResult => AWSPutRecordsResult} import jp.co.bizreach.kinesis.action.PutRecordAction import scala.concurrent._ object AmazonKinesisAsync { def apply()(implicit region: Regions): AmazonKinesisAsync = { new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesisAsync = { new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard .withCredentials(credentials) .withRegion(region) .build()) with PutRecordAction } def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesisAsync = { new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesisAsync = { new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard .withCredentials(credentials) .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(client: AWSKinesisAsync): AmazonKinesisAsync = { new AmazonKinesisAsync(client) with PutRecordAction } } class AmazonKinesisAsync(client: AWSKinesisAsync) { self: PutRecordAction => def putRecordsAsyncWithRetry(request: PutRecordsRequest) (implicit ec: ExecutionContext): Future[Seq[Either[PutRecordsResultEntry, PutRecordsResultEntry]]] = { withPutsAsyncRetry(request.records){ entry => val p = Promise[PutRecordsResult] client.putRecordsAsync(PutRecordsRequest(request.streamName, entry), new AsyncHandler[AWSPutRecordsRequest, AWSPutRecordsResult]{ override def onError(e: Exception): Unit = p.failure(e) override def onSuccess(req: AWSPutRecordsRequest, res: AWSPutRecordsResult): Unit = p.success(res) }) p.future } } def shutdown(): Unit = { client.shutdown() } }
Example 15
Source File: BufferedAmazonKinesis.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis import java.util.concurrent.{TimeUnit, Executors} import com.amazonaws.ClientConfiguration import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.regions.Regions object BufferedAmazonKinesis { def apply(amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = { new BufferedAmazonKinesis(AmazonKinesis(), amount, interval) } def apply(credentials: AWSCredentialsProvider, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = { new BufferedAmazonKinesis(AmazonKinesis(credentials), amount, interval) } def apply(config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = { new BufferedAmazonKinesis(AmazonKinesis(config), amount, interval) } def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = { new BufferedAmazonKinesis(AmazonKinesis(credentials, config), amount, interval) } def apply(client: AmazonKinesis, amount: Int, interval: Long): BufferedAmazonKinesis = { new BufferedAmazonKinesis(client, amount, interval) } } // TODO Would like to provide DiskBufferClient also class BufferedAmazonKinesis(client: AmazonKinesis, amount: Int, interval: Long) { private val queue = new java.util.concurrent.ConcurrentLinkedQueue[Any] private val scheduler = Executors.newSingleThreadScheduledExecutor() scheduler.scheduleAtFixedRate(new BufferedKinesisSendTask(), 0, interval, TimeUnit.MILLISECONDS) def putRecord(request: PutRecordRequest): Unit = queue.add(request) def putRecords(request: PutRecordsRequest): Unit = queue.add(request) def shutdown(): Unit = { scheduler.shutdownNow() client.shutdown() } def error(e: Exception): Unit = { e.printStackTrace() } private class BufferedKinesisSendTask extends Runnable { override def run(): Unit = { try { val requests = for(i <- 1 to amount if queue.size() != 0) yield queue.poll() requests.foreach { case r: PutRecordRequest => client.putRecord(r) case r: PutRecordsRequest => client.putRecords(r) } } catch { case e: Exception => error(e) } } } }
Example 16
Source File: AmazonKinesis.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis import com.amazonaws.ClientConfiguration import com.amazonaws.auth.AWSCredentialsProvider import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.kinesis.{AmazonKinesisClientBuilder, AmazonKinesis => AWSKinesis} import jp.co.bizreach.kinesis.action.PutRecordAction object AmazonKinesis { def apply()(implicit region: Regions): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withCredentials(credentials) .withRegion(region) .build()) with PutRecordAction } def apply(endpointConfiguration: EndpointConfiguration): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withEndpointConfiguration(endpointConfiguration) .build()) with PutRecordAction } def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withClientConfiguration(config) .withEndpointConfiguration(endpointConfiguration) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider, endpointConfiguration: EndpointConfiguration): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withCredentials(credentials) .withEndpointConfiguration(endpointConfiguration) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withCredentials(credentials) .withClientConfiguration(config) .withRegion(region) .build()) with PutRecordAction } def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = { new AmazonKinesis(AmazonKinesisClientBuilder.standard .withCredentials(credentials) .withClientConfiguration(config) .withEndpointConfiguration(endpointConfiguration) .build()) with PutRecordAction } def apply(client: AWSKinesis): AmazonKinesis = { new AmazonKinesis(client) with PutRecordAction } } class AmazonKinesis(client: AWSKinesis){ self: PutRecordAction => def putRecordsWithRetry(request: PutRecordsRequest): Seq[Either[PutRecordsResultEntry, PutRecordsResultEntry]] = { withPutsRetry(request.records){ entry => client.putRecords(PutRecordsRequest(request.streamName, entry)) } } def shutdown(): Unit = { client.shutdown() } }
Example 17
Source File: package.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.regions.Regions import org.apache.spark.rdd.RDD package object spark { implicit class RichRDD[A <: AnyRef](rdd: RDD[A]) { def saveToKinesis(streamName: String, region: Regions, credentials: SparkAWSCredentials = DefaultCredentials, chunk: Int = recordsMaxCount, endpoint: Option[String] = None): Unit = if (!rdd.isEmpty) rdd.sparkContext.runJob(rdd, new KinesisRDDWriter(streamName, region, credentials, chunk, endpoint).write _) } }
Example 18
Source File: KinesisRDDWriter.scala From aws-kinesis-scala with Apache License 2.0 | 5 votes |
package jp.co.bizreach.kinesis.spark import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import jp.co.bizreach.kinesis._ import org.apache.commons.codec.digest.DigestUtils import org.apache.spark.TaskContext import org.json4s.jackson.JsonMethods import org.json4s.{DefaultFormats, Extraction, Formats} import org.slf4j.LoggerFactory class KinesisRDDWriter[A <: AnyRef](streamName: String, region: Regions, credentials: SparkAWSCredentials, chunk: Int, endpoint: Option[String]) extends Serializable { private val logger = LoggerFactory.getLogger(getClass) def write(task: TaskContext, data: Iterator[A]): Unit = { // send data, including retry def put(a: Seq[PutRecordsEntry]) = endpoint.map(e => KinesisRDDWriter.endpointClient(credentials)(e)(region)) .getOrElse(KinesisRDDWriter.client(credentials)(region)) .putRecordsWithRetry(PutRecordsRequest(streamName, a)) .zipWithIndex.collect { case (Left(e), i) => a(i) -> s"${e.errorCode}: ${e.errorMessage}" } val errors = data.foldLeft( (Nil: Seq[PutRecordsEntry], Nil: Seq[(PutRecordsEntry, String)]) ){ (z, x) => val (records, failed) = z val payload = serialize(x) val entry = PutRecordsEntry(DigestUtils.sha256Hex(payload), payload) // record exceeds max size if (entry.recordSize > recordMaxDataSize) records -> ((entry -> "per-record size limit") +: failed) // execute else if (records.size >= chunk || (records.map(_.recordSize).sum + entry.recordSize) >= recordsMaxDataSize) (entry +: Nil) -> (put(records) ++ failed) // buffering else (entry +: records) -> failed } match { case (Nil, e) => e case (rest, e) => put(rest) ++ e } // failed records if (errors.nonEmpty) dump(errors) } protected def dump(errors: Seq[(PutRecordsEntry, String)]): Unit = logger.error( s"""Could not put record, count: ${errors.size}, following details: |${errors map { case (entry, message) => message + "\n" + new String(entry.data, "UTF-8") } mkString "\n"} """.stripMargin) protected def serialize(a: A)(implicit formats: Formats = DefaultFormats): Array[Byte] = JsonMethods.mapper.writeValueAsBytes(Extraction.decompose(a)(formats)) } object KinesisRDDWriter { private val cache = collection.concurrent.TrieMap.empty[Regions, AmazonKinesis] private val client: SparkAWSCredentials => Regions => AmazonKinesis = { credentials => implicit region => cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider)) } private val endpointClient: SparkAWSCredentials => String => Regions => AmazonKinesis = { credentials => endpoint => implicit region => cache.getOrElseUpdate(region, AmazonKinesis(credentials.provider, new EndpointConfiguration(endpoint, region.getName))) } }
Example 19
Source File: S3Config.scala From akka-persistence-s3 with MIT License | 5 votes |
package akka.persistence.s3 import com.amazonaws.regions.{ Regions, Region } import com.typesafe.config.Config private object AWSRegionNames { val GovCloud = Regions.GovCloud.getName val US_EAST_1 = Regions.US_EAST_1.getName val US_WEST_1 = Regions.US_WEST_1.getName val US_WEST_2 = Regions.US_WEST_2.getName val EU_WEST_1 = Regions.EU_WEST_1.getName val EU_CENTRAL_1 = Regions.EU_CENTRAL_1.getName val AP_SOUTHEAST_1 = Regions.AP_SOUTHEAST_1.getName val AP_SOUTHEAST_2 = Regions.AP_SOUTHEAST_2.getName val AP_NORTHEAST_1 = Regions.AP_NORTHEAST_1.getName val AP_NORTHEAST_2 = Regions.AP_NORTHEAST_2.getName val SA_EAST_1 = Regions.SA_EAST_1.getName val CN_NORTH_1 = Regions.CN_NORTH_1.getName } class S3ClientConfig(config: Config) { import AWSRegionNames._ val awsKey = config getString "aws-access-key-id" val awsSecret = config getString "aws-secret-access-key" val awsUseDefaultCredentialsProviderChain = config getBoolean "aws-use-default-credentials-provider-chain" val region: Region = config getString "region" match { case GovCloud => Region.getRegion(Regions.GovCloud) case US_EAST_1 => Region.getRegion(Regions.US_EAST_1) case US_WEST_1 => Region.getRegion(Regions.US_WEST_1) case US_WEST_2 => Region.getRegion(Regions.US_WEST_2) case EU_WEST_1 => Region.getRegion(Regions.EU_WEST_1) case EU_CENTRAL_1 => Region.getRegion(Regions.EU_CENTRAL_1) case AP_SOUTHEAST_1 => Region.getRegion(Regions.AP_SOUTHEAST_1) case AP_SOUTHEAST_2 => Region.getRegion(Regions.AP_SOUTHEAST_2) case AP_NORTHEAST_1 => Region.getRegion(Regions.AP_NORTHEAST_1) case AP_NORTHEAST_2 => Region.getRegion(Regions.AP_NORTHEAST_2) case SA_EAST_1 => Region.getRegion(Regions.SA_EAST_1) case CN_NORTH_1 => Region.getRegion(Regions.CN_NORTH_1) } val endpoint: Option[String] = { val e = config getString "endpoint" if (e == "default") None else Some(e) } object options { val pathStyleAccess = config getBoolean "options.path-style-access" val chunkedEncodingDisabled = config getBoolean "options.chunked-encoding-disabled" } }
Example 20
Source File: StsSdkHelpers.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.testkit.awssdk import akka.http.scaladsl.model.Uri import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.securitytoken.{AWSSecurityTokenService, AWSSecurityTokenServiceClientBuilder} trait StsSdkHelpers { def getAmazonSTSSdk(uri: Uri): AWSSecurityTokenService = { AWSSecurityTokenServiceClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("accesskey", "secretkey"))) .withEndpointConfiguration(new EndpointConfiguration( s"${uri.scheme}://${uri.authority.host.address}:${uri.authority.port}", Regions.DEFAULT_REGION.getName) ) .build() } }
Example 21
Source File: S3Client.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.aws import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.model.{ AccessControlList, BucketPolicy, GroupGrantee, Permission } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } trait S3Client { protected[this] implicit def executionContext: ExecutionContext private val logger = new LoggerHandlerWithId protected[this] def storageS3Settings: StorageS3Settings protected[this] lazy val s3Client: AmazonS3 = { val credentials = new BasicAWSCredentials( storageS3Settings.storageS3AdminAccesskey, storageS3Settings.storageS3AdminSecretkey) val endpointConfiguration = new AwsClientBuilder.EndpointConfiguration( s"http://${storageS3Settings.storageS3Authority.host.address()}:${storageS3Settings.storageS3Authority.port}", Regions.US_EAST_1.getName) AmazonS3ClientBuilder.standard() .withPathStyleAccessEnabled(true) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(endpointConfiguration) .build() } protected[this] def setDefaultBucketAclAndPolicy(bucketName: String)(implicit id: RequestId): Future[Unit] = Future { Try { logger.info("setting bucket acls and policies for bucket {}", bucketName) val acl = s3Client.getBucketAcl(bucketName) acl.revokeAllPermissions(GroupGrantee.AuthenticatedUsers) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Read) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Write) s3Client.setBucketAcl(bucketName, acl) s3Client.setBucketPolicy(bucketName, """{"Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": "*","Resource": ["arn:aws:s3:::*"]}],"Version": "2012-10-17"}""") } match { case Failure(exception) => logger.error("setting bucket acls and policies ex={}", exception.getMessage) case Success(_) => logger.info("acls and policies for bucket {} done", bucketName) } } def getBucketAcl(bucketName: String): Future[AccessControlList] = Future { s3Client.getBucketAcl(bucketName) } def getBucketPolicy(bucketName: String): Future[BucketPolicy] = Future { s3Client.getBucketPolicy(bucketName) } def listBucket: String = { s3Client.listObjects(storageS3Settings.bucketName).getBucketName } }
Example 22
Source File: S3StoreTest.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore package s3 import cats.effect.IO import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} class S3StoreTest extends AbstractStoreTest { val credentials = new BasicAWSCredentials("my_access_key", "my_secret_key") val clientConfiguration = new ClientConfiguration() clientConfiguration.setSignerOverride("AWSS3V4SignerType") val minioHost: String = Option(System.getenv("BLOBSTORE_MINIO_HOST")).getOrElse("minio-container") val minioPort: String = Option(System.getenv("BLOBSTORE_MINIO_PORT")).getOrElse("9000") private val client: AmazonS3 = AmazonS3ClientBuilder.standard() .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( s"http://$minioHost:$minioPort", Regions.US_EAST_1.name())) .withPathStyleAccessEnabled(true) .withClientConfiguration(clientConfiguration) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .build() private val transferManager: TransferManager = TransferManagerBuilder.standard() .withS3Client(client) .build() override val store: Store[IO] = new S3Store[IO](transferManager, blocker = blocker) override val root: String = "blobstore-test-bucket" override def beforeAll(): Unit = { super.beforeAll() try { client.createBucket(root) } catch { case e: com.amazonaws.services.s3.model.AmazonS3Exception if e.getMessage.contains("BucketAlreadyOwnedByYou") => // noop } () } override def afterAll(): Unit = { super.afterAll() try { client.shutdown() } catch { case _: Throwable => } } }
Example 23
Source File: EagerAWSDaleks.scala From aws-training-demo with Apache License 2.0 | 5 votes |
package aws.daleks.eager import com.amazonaws.regions.Regions import com.amazonaws.regions.Regions._ import com.amazonaws.auth.ClasspathPropertiesFileCredentialsProvider import com.amazonaws.regions.Region import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.auth.profile.ProfileCredentialsProvider object EagerAWSDaleks extends App { println("EXTERMINATE!") def findArg(arg:String):Option[String] = { val i = args.indexOf(s"-$arg") if ( i >= 0) Option(args(i+1)) else None } val profile = findArg("profile") implicit val credentials = profile match { case Some(prf) => new ProfileCredentialsProvider(prf) case None => new DefaultAWSCredentialsProviderChain } val excludedRegions = List(GovCloud,CN_NORTH_1) val regions = Regions.values diff excludedRegions println(s"Exterminating regions [${regions.mkString(",")}]") val globals = List( new EagerRoute53Dalek(), new EagerIAMDalek()) val regionals = regions .map { Region.getRegion(_) } .par .foreach { implicit region => println("Preparing extermination of region ["+region+"]") List(new EagerS3Dalek, new EagerBeanstalkDalek, new EagerCloudFormationDalek, new EagerDynamoDBDalek, new EagerElastiCacheDalek, new EagerEMRDalek, new EagerRDSDalek, new EagerS3Dalek, new EagerSNSDalek, new EagerSQSDalek, new EagerEC2Dalek) foreach {_.exterminate} } globals foreach { _.exterminate } println("EXTERMINATE!") }