com.amazonaws.services.s3.AmazonS3ClientBuilder Scala Examples
The following examples show how to use com.amazonaws.services.s3.AmazonS3ClientBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: BootstrapAction.scala From sbt-lighter with Apache License 2.0 | 5 votes |
package sbtlighter import com.amazonaws.services.elasticmapreduce.model.{ BootstrapActionConfig, ScriptBootstrapActionConfig } import com.amazonaws.services.s3.AmazonS3ClientBuilder import scala.collection.JavaConverters._ case class BootstrapAction( name: String, path: String, args: String* ) { def toAwsBootstrapActionConfig(): BootstrapActionConfig = { new BootstrapActionConfig() .withName(name) .withScriptBootstrapAction( new ScriptBootstrapActionConfig() .withPath(path) .withArgs(args.asJava)) } }
Example 2
Source File: S3BrainTest.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.brain import akka.actor.ActorSystem import akka.pattern.ask import akka.testkit.TestKit import akka.util.Timeout import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.sumologic.sumobot.brain.Brain.ValueRetrieved import com.sumologic.sumobot.core.aws.AWSAccounts import com.sumologic.sumobot.test.annotated.SumoBotTestKit import org.scalatest.{BeforeAndAfterAll, Matchers} import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Random class S3BrainTest extends SumoBotTestKit(ActorSystem("S3SingleObjectBrainTest")) with BeforeAndAfterAll with Matchers { lazy val credsOption = AWSAccounts.load(system.settings.config).values.headOption val bucketPrefix = "sumobot-s3-brain" // The tests here only run if there are valid AWS credentials in the configuration. Otherwise, // they're skipped. credsOption foreach { creds => cleanupBuckets(creds) val bucket = bucketPrefix + randomString(5) "S3 brain" should { "persist the contents across reloads" in { implicit val timeout = Timeout(5.seconds) val s3Key = randomString(16) val firstBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key)) firstBrain ! Brain.Store("hello", "world") // Just wait for the next message to return. val firstRetrieval = firstBrain ? Brain.Retrieve("hello") val firstResult = Await.result(firstRetrieval, 5.seconds) firstResult match { case ValueRetrieved(k, v) => k should be("hello") v should be("world") case wrongResult => fail(s"Did not get what we expected: $wrongResult") } // Since we wrote to S3, the 2nd brain should now have the value. val secondBrain = system.actorOf(S3Brain.props(creds, bucket, s3Key)) val secondRetrieval = secondBrain ? Brain.Retrieve("hello") val secondResult = Await.result(secondRetrieval, 5.seconds) secondResult match { case ValueRetrieved(k, v) => k should be("hello") v should be("world") case wrongResult => fail(s"Did not get what we expected: $wrongResult") } } } } private def randomString(length: Int): String = { val alphabet = ('a' to 'z').mkString + ('0' to '9').mkString (1 to length). map(_ => Random.nextInt(alphabet.length)). map(alphabet.charAt).mkString } override def afterAll() { TestKit.shutdownActorSystem(system) credsOption.foreach(cleanupBuckets) } def cleanupBuckets(creds: AWSCredentials): Unit = { val s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(creds)).build() s3.listBuckets().asScala.filter(_.getName.startsWith(bucketPrefix)).foreach { bucket => println(s"Deleting S3 bucket ${bucket.getName}") val objects = s3.listObjects(bucket.getName).getObjectSummaries.asScala.map(_.getKey) objects.foreach { obj => s3.deleteObject(bucket.getName, obj) } s3.deleteBucket(bucket.getName) } } }
Example 3
Source File: S3Brain.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.brain import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import java.util.Properties import akka.actor.{Actor, Props} import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.services.s3.{AmazonS3Client, AmazonS3ClientBuilder} import com.amazonaws.services.s3.model.ObjectMetadata import com.sumologic.sumobot.brain.Brain._ import scala.collection.JavaConverters._ import scala.collection.immutable object S3Brain { def props(credentials: AWSCredentials, bucket: String, s3Key: String): Props = Props(classOf[S3Brain], credentials, bucket, s3Key) } class S3Brain(credentials: AWSCredentials, bucket: String, s3Key: String) extends Actor { private val s3Client = AmazonS3ClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)).build private var brainContents: Map[String, String] = loadFromS3() override def receive: Receive = { case Store(key, value) => brainContents += (key -> value) saveToS3(brainContents) case Remove(key) => brainContents -= key saveToS3(brainContents) case Retrieve(key) => brainContents.get(key) match { case Some(value) => sender() ! ValueRetrieved(key, value) case None => sender() ! ValueMissing(key) } case ListValues(prefix) => sender() ! ValueMap(brainContents.filter(_._1.startsWith(prefix))) } private def loadFromS3(): Map[String, String] = { if (s3Client.doesBucketExistV2(bucket)) { val props = new Properties() props.load(s3Client.getObject(bucket, s3Key).getObjectContent) immutable.Map(props.asScala.toSeq: _*) } else { Map.empty } } private def saveToS3(contents: Map[String, String]): Unit = { if (!s3Client.doesBucketExistV2(bucket)) { s3Client.createBucket(bucket) } val props = new Properties() props.putAll(contents.asJava) val out = new ByteArrayOutputStream() props.store(out, "") out.flush() out.close() val in = new ByteArrayInputStream(out.toByteArray) s3Client.putObject(bucket, s3Key, in, new ObjectMetadata()) } }
Example 4
Source File: ArtifactS3Saver.scala From marvin-engine-executor with Apache License 2.0 | 5 votes |
package org.marvin.artifact.manager import java.io.File import akka.Done import akka.actor.{Actor, ActorLogging} import com.amazonaws.services.s3.model.GetObjectRequest import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import org.apache.hadoop.fs.Path import org.marvin.artifact.manager.ArtifactSaver.{SaveToLocal, SaveToRemote} import org.marvin.model.EngineMetadata class ArtifactS3Saver(metadata: EngineMetadata) extends Actor with ActorLogging { var s3Client: AmazonS3 = _ override def preStart() = { log.info(s"${this.getClass().getCanonicalName} actor initialized...") //Create S3 Client with default credential informations(Environment Variable) s3Client = AmazonS3ClientBuilder.standard.withRegion(System.getenv("AWS_DEFAULT_REGION")).build log.info("Amazon S3 client initialized...") } def generatePaths(artifactName: String, protocol: String): Map[String, Path] = { var artifactsRemotePath: String = null if(metadata.artifactsRemotePath.startsWith("/")){ artifactsRemotePath = metadata.artifactsRemotePath.substring(1) } Map( "localPath" -> new Path(s"${metadata.artifactsLocalPath}/${metadata.name}/$artifactName"), "remotePath" -> new Path(s"${artifactsRemotePath}/${metadata.name}/${metadata.version}/$artifactName/$protocol") ) } def validatePath(path: Path, isRemote: Boolean): Boolean = { if (isRemote) { s3Client.doesObjectExist(metadata.s3BucketName, path.toString) } else { new java.io.File(path.toString).exists } } override def receive: Receive = { case SaveToLocal(artifactName, protocol) => log.info("Receive message and starting to working...") val uris = generatePaths(artifactName, protocol) val localToSave = new File(uris("localPath").toString) // Validate if the protocol is correct if (validatePath(uris("remotePath"), true)) { log.info(s"Copying files from ${metadata.s3BucketName}: ${uris("remotePath")} to ${uris("localPath")}") //Get artifact named "uris("remotePath")" from S3 Bucket and save it to local s3Client.getObject(new GetObjectRequest(metadata.s3BucketName, uris("remotePath").toString), localToSave) log.info(s"File ${uris("localPath")} saved!") } else { log.error(s"Invalid protocol: ${protocol}, save process canceled!") } sender ! Done case SaveToRemote(artifactName, protocol) => log.info("Receive message and starting to working...") val uris = generatePaths(artifactName, protocol) val fileToUpload = new File(uris("localPath").toString) // Validate if the protocol is correct if (validatePath(uris("localPath"), false)) { log.info(s"Copying files from ${uris("localPath")} to ${metadata.s3BucketName}: ${uris("remotePath")}") //Get local artifact and save to S3 Bucket with name "uris("remotePath")" s3Client.putObject(metadata.s3BucketName, uris("remotePath").toString, fileToUpload) log.info(s"File ${uris("localPath")} saved!") } else { log.error(s"Invalid protocol: ${protocol}, save process canceled!") } sender ! Done case _ => log.warning("Received a bad format message...") } }
Example 5
Source File: FileSystem.scala From lighthouse with Apache License 2.0 | 5 votes |
package be.dataminded.lighthouse.common import better.files._ import com.amazonaws.services.s3.model.S3Object import com.amazonaws.services.s3.{AmazonS3ClientBuilder, AmazonS3URI} object FileSystem { def read(path: String): String = { if (path.startsWith("s3")) new S3FileSystem().read(path) else new LocalFileSystem().read(path) } } trait FileSystem { def read(path: String): String } class S3FileSystem extends FileSystem { override def read(path: String): String = { val s3Client = AmazonS3ClientBuilder.standard().build() val uri: AmazonS3URI = new AmazonS3URI(path) val s3Object: S3Object = s3Client.getObject(uri.getBucket, uri.getKey) s3Object.getObjectContent.asString() } } class LocalFileSystem extends FileSystem { override def read(path: String): String = file"$path".contentAsString }
Example 6
Source File: S3Utils.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.benchmarks import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} object S3Utils { def minioClient(): AmazonS3 = { val endpointConfig = new EndpointConfiguration("http://localhost:9000", "us-east-1") val clientConfig = new ClientConfiguration() clientConfig.setSignerOverride("AWSS3V4SignerType") AmazonS3ClientBuilder.standard .withPathStyleAccessEnabled(true) .withEndpointConfiguration(endpointConfig) .withClientConfiguration(clientConfig) .withCredentials(new AWSStaticCredentialsProvider(new AWSCredentials { override def getAWSAccessKeyId: String = "minioadmin" override def getAWSSecretKey: String = "minioadmin" })) .build() } def defaultClient(): AmazonS3 = AmazonS3ClientBuilder.defaultClient() }
Example 7
Source File: S3ObjectUploader.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.tools.neptune.export import java.io._ import java.util import java.util.concurrent.{Executors, TimeoutException} import java.util.stream.Collectors import java.util.{Collections, Vector} import com.amazonaws.auth.profile.ProfileCredentialsProvider import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest} import com.amazonaws.{AmazonServiceException, ClientConfiguration, Protocol, SdkClientException} import org.apache.commons.io.{FileUtils, IOUtils} import org.slf4j.LoggerFactory import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.{FiniteDuration, _} object S3ObjectUploader{ val executor = Executors.newFixedThreadPool(1) implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.fromExecutor(executor) protected lazy val logger = LoggerFactory.getLogger("s3_uploader") def init(proxyHost:Option[String], proxyPort:Option[Int]) = { val clientRegion = "us-east-1" val config = new ClientConfiguration config.setProtocol(Protocol.HTTPS) proxyHost.foreach(host => config.setProxyHost(host)) proxyPort.foreach(port => config.setProxyPort(port)) val s3Client = AmazonS3ClientBuilder.standard() .withRegion(clientRegion) .withClientConfiguration(config) .withCredentials(new ProfileCredentialsProvider()) .build() s3Client } def persistChunkToS3Bucket(chunkData:String, fileName:String, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String) = { try{ init(proxyHost, proxyPort).putObject(s3Directory, fileName, chunkData) } catch { case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } def persistChunkToS3Bucket(tmpFile:File, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String, retryCount:Int = 3):Unit = { try{ val s3UploadTask = Future{init(proxyHost, proxyPort).putObject(s3Directory, tmpFile.getName, tmpFile)}(ec) Await.result(s3UploadTask, 5.minutes) tmpFile.delete() } catch { case e:TimeoutException => if(retryCount > 0) { logger.error("S3 upload task run more than 5 minutes..Going to retry") persistChunkToS3Bucket(tmpFile, proxyHost, proxyPort, s3Directory, retryCount-1) } else{ throw new Exception( "S3 upload task duration was more than 5 minutes") } case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } }
Example 8
Source File: AwsInitializers.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.aws import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.kinesis.{AmazonKinesis, AmazonKinesisClientBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.amazonaws.services.sns.{AmazonSNS, AmazonSNSAsyncClientBuilder} object AwsInitializers { lazy val accessKeyId = sys.env.getOrElse("AWS_ACCESS_KEY_ID", "") lazy val accessKey = sys.env.getOrElse("AWS_SECRET_ACCESS_KEY", "") lazy val credentials = new BasicAWSCredentials(accessKeyId, accessKey) def createKinesis(): AmazonKinesis = { AmazonKinesisClientBuilder .standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("KINESIS_ENDPOINT"), sys.env("AWS_REGION"))) .build() } def createSns(): AmazonSNS = { AmazonSNSAsyncClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("SNS_ENDPOINT"), sys.env("AWS_REGION"))) .build } def createS3(): AmazonS3 = { AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } def createExportDataS3(): AmazonS3 = { AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("DATA_EXPORT_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } // This is still in the old SBS AWS account def createS3Fileupload(): AmazonS3 = { val credentials = new BasicAWSCredentials( sys.env("FILEUPLOAD_S3_AWS_ACCESS_KEY_ID"), sys.env("FILEUPLOAD_S3_AWS_SECRET_ACCESS_KEY") ) AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(new EndpointConfiguration(sys.env("FILEUPLOAD_S3_ENDPOINT"), sys.env("AWS_REGION"))) .build } }
Example 9
Source File: LambdaDeploymentAccount.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.shared.functions.lambda import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import cool.graph.shared.models.Project import play.api.libs.json.Json import software.amazon.awssdk.auth.{AwsCredentials, StaticCredentialsProvider} import software.amazon.awssdk.regions.Region import software.amazon.awssdk.services.lambda.LambdaAsyncClient object LambdaDeploymentAccount { implicit val lambdaDeploymentBucket = Json.format[LambdaDeploymentBucket] implicit val lambdaDeploymentAccountFormat = Json.format[LambdaDeploymentAccount] } case class LambdaDeploymentAccount( id: String, accessKeyID: String, accessKey: String, deployIamArn: String, deploymentEnabled: Boolean, deploymentBuckets: Vector[LambdaDeploymentBucket] ) { lazy val credentialsProvider = new StaticCredentialsProvider(new AwsCredentials(accessKeyID, accessKey)) lazy val s3Credentials = new BasicAWSCredentials(accessKeyID, accessKey) def bucket(project: Project): String = { val region = getRegion(project.region.toString) deploymentBuckets.find(_.region == region).getOrElse(sys.error("Region is not supported for lambda deployment")).deploymentBucket } def lambdaClient(project: Project): LambdaAsyncClient = LambdaAsyncClient .builder() .region(Region.of(project.region.toString)) .credentialsProvider(credentialsProvider) .build() def s3Client(project: Project): AmazonS3 = { val region = getRegion(project.region.toString) AmazonS3ClientBuilder.standard .withCredentials(new AWSStaticCredentialsProvider(s3Credentials)) .withEndpointConfiguration(new EndpointConfiguration(s"s3-$region.amazonaws.com", region)) .build } private def getRegion(region: String) = Region.of(region).toString } case class LambdaDeploymentBucket(region: String, deploymentBucket: String)
Example 10
Source File: FileManagerS3Mock.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.api.service import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.services.s3.model.ObjectMetadata import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import org.specs2.mock.Mockito import scala.concurrent.duration._ case class FileManagerS3Mock() extends Mockito { val s3Configuration = AwsS3Configuration("hat-storage-test", "testAwsAccessKey", "testAwsSecret", "eu-west-1", 5.minutes) private val awsCreds: BasicAWSCredentials = new BasicAWSCredentials(s3Configuration.accessKeyId, s3Configuration.secretKey) val mockS3client: AmazonS3 = spy(AmazonS3ClientBuilder.standard() .withRegion("eu-west-1") .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .build()) private val s3ObjectMetadata = new ObjectMetadata() s3ObjectMetadata.setContentLength(123456L) doReturn(s3ObjectMetadata).when(mockS3client).getObjectMetadata("hat-storage-test", "hat.hubofallthings.net/testFile") doNothing.when(mockS3client).deleteObject("hat-storage-test", "hat.hubofallthings.net/deleteFile") }
Example 11
Source File: FileManagerModule.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.modules import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.google.inject.name.Named import com.google.inject.{ AbstractModule, Provides } import net.codingwell.scalaguice.ScalaModule import org.hatdex.hat.api.service.{ AwsS3Configuration, FileManager, FileManagerS3 } import play.api.Configuration import play.api.libs.concurrent.AkkaGuiceSupport class FileManagerModule extends AbstractModule with ScalaModule with AkkaGuiceSupport { override def configure() = { bind[FileManager].to[FileManagerS3] () } @Provides def provideCookieAuthenticatorService(configuration: Configuration): AwsS3Configuration = { import AwsS3Configuration.configLoader configuration.get[AwsS3Configuration]("storage.s3Configuration") } @Provides @Named("s3client-file-manager") def provides3Client(configuration: AwsS3Configuration): AmazonS3 = { val awsCreds: BasicAWSCredentials = new BasicAWSCredentials(configuration.accessKeyId, configuration.secretKey) AmazonS3ClientBuilder.standard() .withRegion(configuration.region) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)) .build() } }
Example 12
Source File: Aws.scala From embulk-output-s3_parquet with MIT License | 5 votes |
package org.embulk.output.s3_parquet.aws import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.services.glue.{AWSGlue, AWSGlueClientBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.amazonaws.services.s3.transfer.{ TransferManager, TransferManagerBuilder } object Aws { trait Task extends AwsCredentials.Task with AwsEndpointConfiguration.Task with AwsClientConfiguration.Task with AwsS3Configuration.Task def apply(task: Task): Aws = { new Aws(task) } } class Aws(task: Aws.Task) { def withS3[A](f: AmazonS3 => A): A = { val builder: AmazonS3ClientBuilder = AmazonS3ClientBuilder.standard() AwsS3Configuration(task).configureAmazonS3ClientBuilder(builder) val svc = createService(builder) try f(svc) finally svc.shutdown() } def withTransferManager[A](f: TransferManager => A): A = { withS3 { s3 => val svc = TransferManagerBuilder.standard().withS3Client(s3).build() try f(svc) finally svc.shutdownNow(false) } } def withGlue[A](f: AWSGlue => A): A = { val builder: AWSGlueClientBuilder = AWSGlueClientBuilder.standard() val svc = createService(builder) try f(svc) finally svc.shutdown() } def createService[S <: AwsClientBuilder[S, T], T]( builder: AwsClientBuilder[S, T] ): T = { AwsEndpointConfiguration(task).configureAwsClientBuilder(builder) AwsClientConfiguration(task).configureAwsClientBuilder(builder) builder.setCredentials(AwsCredentials(task).createAwsCredentialsProvider) builder.build() } }
Example 13
Source File: S3Manager.scala From project-matt with MIT License | 5 votes |
package org.datafy.aws.app.matt.extras import java.util.Date import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.ListObjectsV2Request import scala.collection.JavaConverters._ case class S3KeySummary( bucketName: String, key: String, size: Int, lastModifiedDate: Option[Date] = None, continuationToken: Option[String] = None ) object S3Manager { private val AWS_S3_CLIENT = AmazonS3ClientBuilder.defaultClient() val S3_MAX_SCAN_SIZE: Int = 3145728 * 1024 * 1024 val S3_MAX_RESULTS: Int = 1000 def getMyBucketsSummary() = { val allBuckets = AWS_S3_CLIENT.listBuckets() allBuckets.asScala.toList.map(_.getName) } def getBucketObjects(bucketName: String) = { val bucketObjects = AWS_S3_CLIENT.listObjectsV2(bucketName) val objectSummaries = bucketObjects.getObjectSummaries objectSummaries.asScala.toList.map{ s3Object => S3KeySummary(s3Object.getBucketName, s3Object.getKey, s3Object.getSize.toInt, Some(s3Object.getLastModified) ) } } def getBucketObjects(bucketName: String, keyPrefix: String, lastScannedObject: Option[String] = None) = { val objectsV2Request = new ListObjectsV2Request() .withBucketName(bucketName) .withPrefix(keyPrefix) .withMaxKeys(S3_MAX_RESULTS) .withStartAfter(lastScannedObject.getOrElse("")) val objectSummaries = AWS_S3_CLIENT.listObjectsV2(objectsV2Request).getObjectSummaries objectSummaries.asScala.toList .filter( _.getKey != keyPrefix ) .map { s3Object => S3KeySummary(s3Object.getBucketName, s3Object.getKey, s3Object.getSize.toInt, Some(s3Object.getLastModified) ) } } def getObjectContentAsStream(bucketName: String, objectKey: String) ={ val contentStream = AWS_S3_CLIENT.getObject(bucketName, objectKey) contentStream.getObjectContent } def computeTotalObjectSize(s3KeySummary: List[S3KeySummary]) = { val bucketSummaryTuple = s3KeySummary.map { s3Object => (s3Object.bucketName, s3Object.size) }.groupBy(_._1).mapValues(_.map(_._2).sum).toList bucketSummaryTuple } def computeTotalScanCost(): Unit = { } }
Example 14
Source File: S3Repository.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.repository.s3 import java.net.URI import java.nio.file.{Files, Path} import java.util.concurrent.Executors import akka.actor.ActorSystem import com.amazonaws.services.s3.{AmazonS3ClientBuilder, AmazonS3URI} import com.typesafe.config.Config import ml.combust.mleap.executor.repository.{Repository, RepositoryProvider} import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.TimeUnit import scala.util.Try class S3RepositoryConfig(config: Config) { val threads: Int = config.getInt("threads") } class S3Repository(config: S3RepositoryConfig) extends Repository { private val client = AmazonS3ClientBuilder.defaultClient() private val threadPool = Executors.newFixedThreadPool(config.threads) implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool) override def downloadBundle(uri: URI): Future[Path] = Future { val s3Uri = new AmazonS3URI(uri) val bucket = s3Uri.getBucket val key = s3Uri.getKey val tmpFile = Files.createTempFile("mleap", ".bundle.zip") Files.copy(client.getObject(bucket, key).getObjectContent, tmpFile) tmpFile } override def canHandle(uri: URI): Boolean = Try(new AmazonS3URI(uri)).isSuccess override def shutdown(): Unit = threadPool.shutdown() override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit) } class S3RepositoryProvider extends RepositoryProvider { override def create(config: Config) (implicit system: ActorSystem): S3Repository = { new S3Repository(new S3RepositoryConfig(config)) } }
Example 15
Source File: S3Minio.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.s3 import java.net.ServerSocket import actionContainers.ActionContainer import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.typesafe.config.ConfigFactory import common.{SimpleExec, StreamLogging} import org.scalatest.{BeforeAndAfterAll, FlatSpec} import org.apache.openwhisk.common.{Logging, TransactionId} import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer} import scala.concurrent.duration._ import scala.reflect.ClassTag trait S3Minio extends FlatSpec with BeforeAndAfterAll with StreamLogging { def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore = { val config = ConfigFactory.parseString(s""" |whisk { | s3 { | alpakka { | aws { | credentials { | provider = static | access-key-id = "$accessKey" | secret-access-key = "$secretAccessKey" | } | region { | provider = static | default-region = us-west-2 | } | } | endpoint-url = "http://localhost:$port" | } | bucket = "$bucket" | $prefixConfig | } |} """.stripMargin).withFallback(ConfigFactory.load()) S3AttachmentStoreProvider.makeStore[D](config) } private val accessKey = "TESTKEY" private val secretAccessKey = "TESTSECRET" private val port = freePort() private val bucket = "test-ow-travis" private def prefixConfig = { if (bucketPrefix.nonEmpty) s"prefix = $bucketPrefix" else "" } protected def bucketPrefix: String = "" override protected def beforeAll(): Unit = { super.beforeAll() dockerExec( s"run -d -e MINIO_ACCESS_KEY=$accessKey -e MINIO_SECRET_KEY=$secretAccessKey -p $port:9000 minio/minio server /data") println(s"Started minio on $port") createTestBucket() } override def afterAll(): Unit = { super.afterAll() val containerId = dockerExec("ps -q --filter ancestor=minio/minio") containerId.split("\n").map(_.trim).foreach(id => dockerExec(s"stop $id")) println(s"Stopped minio container") } def createTestBucket(): Unit = { val endpoint = new EndpointConfiguration(s"http://localhost:$port", "us-west-2") val client = AmazonS3ClientBuilder.standard .withPathStyleAccessEnabled(true) .withEndpointConfiguration(endpoint) .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretAccessKey))) .build org.apache.openwhisk.utils.retry(client.createBucket(bucket), 6, Some(1.minute)) println(s"Created bucket $bucket") } private def dockerExec(cmd: String): String = { implicit val tid: TransactionId = TransactionId.testing val command = s"${ActionContainer.dockerCmd} $cmd" val cmdSeq = command.split(" ").map(_.trim).filter(_.nonEmpty) val (out, err, code) = SimpleExec.syncRunCmd(cmdSeq) assert(code == 0, s"Error occurred for command '$command'. Exit code: $code, Error: $err") out } private def freePort(): Int = { val socket = new ServerSocket(0) try socket.getLocalPort finally if (socket != null) socket.close() } }
Example 16
Source File: AmazonS3.scala From sbt-aws-lambda with Apache License 2.0 | 5 votes |
package com.gilt.aws.lambda.wrapper import com.amazonaws.auth.DefaultAWSCredentialsProviderChain import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.{Region => _, _} import scala.util.Try import com.gilt.aws.lambda.Region trait AmazonS3 { def listBuckets(): Try[java.util.List[Bucket]] def createBucket(bucket: String): Try[Bucket] def putObject(req: PutObjectRequest): Try[PutObjectResult] } object AmazonS3 { def instance(region: Region): AmazonS3 = { val auth = new DefaultAWSCredentialsProviderChain() val client = AmazonS3ClientBuilder.standard() .withCredentials(auth) .withRegion(region.value) .build new AmazonS3 { def listBuckets() = Try(client.listBuckets) def createBucket(bucket: String) = Try(client.createBucket(bucket)) def putObject(req: PutObjectRequest) = Try(client.putObject(req)) } } }
Example 17
Source File: S3SdkHelpers.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.testkit.awssdk import java.io.File import akka.http.scaladsl.model.Uri.Authority import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.transfer.TransferManagerBuilder import com.amazonaws.services.s3.transfer.model.UploadResult import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ trait S3SdkHelpers { val awsRegion = ConfigFactory.load().getString("rokku.storage.s3.region") def getAmazonS3(authority: Authority, credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token") ): AmazonS3 = { val cliConf = new ClientConfiguration() cliConf.setMaxErrorRetry(1) AmazonS3ClientBuilder .standard() .withClientConfiguration(cliConf) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withPathStyleAccessEnabled(true) .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion)) .build() } def getKeysInBucket(sdk: AmazonS3, bucket: String): List[String] = sdk .listObjectsV2(bucket) .getObjectSummaries .asScala.toList .map(_.getKey) def doMultiPartUpload(sdk: AmazonS3, bucket: String, file: String, key: String): UploadResult = { val upload = TransferManagerBuilder .standard() .withS3Client(sdk) .build() .upload(bucket, key, new File(file)) upload.waitForUploadResult() } }
Example 18
Source File: RokkuS3ProxyVirtualHostedItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy import akka.http.scaladsl.model.Uri.Authority import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider, BasicSessionCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} class RokkuS3ProxyVirtualHostedItTest extends RokkuS3ProxyItTest { override def getAmazonS3(authority: Authority, credentials: AWSCredentials = new BasicSessionCredentials("accesskey", "secretkey", "token") ): AmazonS3 = { val cliConf = new ClientConfiguration() cliConf.setMaxErrorRetry(1) AmazonS3ClientBuilder .standard() .withClientConfiguration(cliConf) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withPathStyleAccessEnabled(false) .withEndpointConfiguration(new EndpointConfiguration(s"http://s3.localhost:${authority.port}", awsRegion)) .build() } }
Example 19
Source File: S3Client.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.aws import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.model.{ AccessControlList, BucketPolicy, GroupGrantee, Permission } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } trait S3Client { protected[this] implicit def executionContext: ExecutionContext private val logger = new LoggerHandlerWithId protected[this] def storageS3Settings: StorageS3Settings protected[this] lazy val s3Client: AmazonS3 = { val credentials = new BasicAWSCredentials( storageS3Settings.storageS3AdminAccesskey, storageS3Settings.storageS3AdminSecretkey) val endpointConfiguration = new AwsClientBuilder.EndpointConfiguration( s"http://${storageS3Settings.storageS3Authority.host.address()}:${storageS3Settings.storageS3Authority.port}", Regions.US_EAST_1.getName) AmazonS3ClientBuilder.standard() .withPathStyleAccessEnabled(true) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(endpointConfiguration) .build() } protected[this] def setDefaultBucketAclAndPolicy(bucketName: String)(implicit id: RequestId): Future[Unit] = Future { Try { logger.info("setting bucket acls and policies for bucket {}", bucketName) val acl = s3Client.getBucketAcl(bucketName) acl.revokeAllPermissions(GroupGrantee.AuthenticatedUsers) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Read) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Write) s3Client.setBucketAcl(bucketName, acl) s3Client.setBucketPolicy(bucketName, """{"Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": "*","Resource": ["arn:aws:s3:::*"]}],"Version": "2012-10-17"}""") } match { case Failure(exception) => logger.error("setting bucket acls and policies ex={}", exception.getMessage) case Success(_) => logger.info("acls and policies for bucket {} done", bucketName) } } def getBucketAcl(bucketName: String): Future[AccessControlList] = Future { s3Client.getBucketAcl(bucketName) } def getBucketPolicy(bucketName: String): Future[BucketPolicy] = Future { s3Client.getBucketPolicy(bucketName) } def listBucket: String = { s3Client.listObjects(storageS3Settings.bucketName).getBucketName } }
Example 20
Source File: S3StoreTest.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore package s3 import cats.effect.IO import com.amazonaws.ClientConfiguration import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.transfer.{TransferManager, TransferManagerBuilder} import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder} class S3StoreTest extends AbstractStoreTest { val credentials = new BasicAWSCredentials("my_access_key", "my_secret_key") val clientConfiguration = new ClientConfiguration() clientConfiguration.setSignerOverride("AWSS3V4SignerType") val minioHost: String = Option(System.getenv("BLOBSTORE_MINIO_HOST")).getOrElse("minio-container") val minioPort: String = Option(System.getenv("BLOBSTORE_MINIO_PORT")).getOrElse("9000") private val client: AmazonS3 = AmazonS3ClientBuilder.standard() .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration( s"http://$minioHost:$minioPort", Regions.US_EAST_1.name())) .withPathStyleAccessEnabled(true) .withClientConfiguration(clientConfiguration) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .build() private val transferManager: TransferManager = TransferManagerBuilder.standard() .withS3Client(client) .build() override val store: Store[IO] = new S3Store[IO](transferManager, blocker = blocker) override val root: String = "blobstore-test-bucket" override def beforeAll(): Unit = { super.beforeAll() try { client.createBucket(root) } catch { case e: com.amazonaws.services.s3.model.AmazonS3Exception if e.getMessage.contains("BucketAlreadyOwnedByYou") => // noop } () } override def afterAll(): Unit = { super.afterAll() try { client.shutdown() } catch { case _: Throwable => } } }
Example 21
Source File: AwsS3Configuration.scala From embulk-output-s3_parquet with MIT License | 5 votes |
package org.embulk.output.s3_parquet.aws import java.util.Optional import com.amazonaws.services.s3.AmazonS3ClientBuilder import org.embulk.config.{Config, ConfigDefault} import org.embulk.output.s3_parquet.aws.AwsS3Configuration.Task object AwsS3Configuration { trait Task { @Config("accelerate_mode_enabled") @ConfigDefault("null") def getAccelerateModeEnabled: Optional[Boolean] @Config("chunked_encoding_disabled") @ConfigDefault("null") def getChunkedEncodingDisabled: Optional[Boolean] @Config("dualstack_enabled") @ConfigDefault("null") def getDualstackEnabled: Optional[Boolean] @Config("force_global_bucket_access_enabled") @ConfigDefault("null") def getForceGlobalBucketAccessEnabled: Optional[Boolean] @Config("path_style_access_enabled") @ConfigDefault("null") def getPathStyleAccessEnabled: Optional[Boolean] @Config("payload_signing_enabled") @ConfigDefault("null") def getPayloadSigningEnabled: Optional[Boolean] } def apply(task: Task): AwsS3Configuration = { new AwsS3Configuration(task) } } class AwsS3Configuration(task: Task) { def configureAmazonS3ClientBuilder(builder: AmazonS3ClientBuilder): Unit = { task.getAccelerateModeEnabled.ifPresent(v => builder.setAccelerateModeEnabled(v) ) task.getChunkedEncodingDisabled.ifPresent(v => builder.setChunkedEncodingDisabled(v) ) task.getDualstackEnabled.ifPresent(v => builder.setDualstackEnabled(v)) task.getForceGlobalBucketAccessEnabled.ifPresent(v => builder.setForceGlobalBucketAccessEnabled(v) ) task.getPathStyleAccessEnabled.ifPresent(v => builder.setPathStyleAccessEnabled(v) ) task.getPayloadSigningEnabled.ifPresent(v => builder.setPayloadSigningEnabled(v) ) } }