com.amazonaws.services.s3.model.StorageClass Java Examples
The following examples show how to use
com.amazonaws.services.s3.model.StorageClass.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testRestoreObjects() { // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Initiate a restore request for the test S3 file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); // Validate that there is an ongoing restore request for this object. ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null); assertTrue(objectMetadata.getOngoingRestore()); }
Example #2
Source File: PutS3Object.java From localization_nifi with Apache License 2.0 | 6 votes |
public MultipartState(String buf) { String[] fields = buf.split(SEPARATOR); _uploadId = fields[0]; _filePosition = Long.parseLong(fields[1]); _partETags = new ArrayList<>(); for (String part : fields[2].split(",")) { if (part != null && !part.isEmpty()) { String[] partFields = part.split("/"); _partETags.add(new PartETag(Integer.parseInt(partFields[0]), partFields[1])); } } _partSize = Long.parseLong(fields[3]); _storageClass = StorageClass.fromValue(fields[4]); _contentLength = Long.parseLong(fields[5]); _timestamp = Long.parseLong(fields[6]); }
Example #3
Source File: S3BlobStore.java From crate with Apache License 2.0 | 6 votes |
public static StorageClass initStorageClass(String storageClass) { if ((storageClass == null) || storageClass.equals("")) { return StorageClass.Standard; } try { final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); if (_storageClass.equals(StorageClass.Glacier)) { throw new BlobStoreException("Glacier storage class is not supported"); } return _storageClass; } catch (final IllegalArgumentException illegalArgumentException) { throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class."); } }
Example #4
Source File: S3DataOutputStream.java From stratosphere with Apache License 2.0 | 6 votes |
private String initiateMultipartUpload() throws IOException { boolean operationSuccessful = false; final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object); if (this.useRRS) { request.setStorageClass(StorageClass.ReducedRedundancy); } else { request.setStorageClass(StorageClass.Standard); } try { final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request); operationSuccessful = true; return result.getUploadId(); } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } finally { if (!operationSuccessful) { abortUpload(); } } }
Example #5
Source File: AwsS3BuildCacheService.java From gradle-s3-build-cache with Apache License 2.0 | 6 votes |
@Override public void store(BuildCacheKey key, BuildCacheEntryWriter writer) { final String bucketPath = getBucketPath(key); logger.info("Start storing cache entry '{}' in S3 bucket", bucketPath); ObjectMetadata meta = new ObjectMetadata(); meta.setContentType(BUILD_CACHE_CONTENT_TYPE); try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { writer.writeTo(os); meta.setContentLength(os.size()); try (InputStream is = new ByteArrayInputStream(os.toByteArray())) { PutObjectRequest request = getPutObjectRequest(bucketPath, meta, is); if(this.reducedRedundancy) { request.withStorageClass(StorageClass.ReducedRedundancy); } s3.putObject(request); } } catch (IOException e) { throw new BuildCacheException("Error while storing cache object in S3 bucket", e); } }
Example #6
Source File: S3Backuper.java From cassandra-backup with Apache License 2.0 | 6 votes |
@Override public FreshenResult freshenRemoteObject(final RemoteObjectReference object) throws InterruptedException { final String canonicalPath = ((S3RemoteObjectReference) object).canonicalPath; final CopyObjectRequest copyRequest = new CopyObjectRequest(request.storageLocation.bucket, canonicalPath, request.storageLocation.bucket, canonicalPath).withStorageClass(StorageClass.Standard); try { // attempt to refresh existing object in the bucket via an inplace copy transferManager.copy(copyRequest).waitForCompletion(); return FreshenResult.FRESHENED; } catch (final AmazonServiceException e) { // AWS S3 under certain access policies can't return NoSuchKey (404) // instead, it returns AccessDenied (403) — handle it the same way if (e.getStatusCode() != 404 && e.getStatusCode() != 403) { throw e; } // the freshen failed because the file/key didn't exist return FreshenResult.UPLOAD_REQUIRED; } }
Example #7
Source File: ITPutS3Object.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testStorageClasses() throws IOException { final TestRunner runner = TestRunners.newTestRunner(new PutS3Object()); runner.setProperty(PutS3Object.CREDENTIALS_FILE, CREDENTIALS_FILE); runner.setProperty(PutS3Object.REGION, REGION); runner.setProperty(PutS3Object.BUCKET, BUCKET_NAME); Assert.assertTrue(runner.setProperty("x-custom-prop", "hello").isValid()); for (StorageClass storageClass : StorageClass.values()) { runner.setProperty(PutS3Object.STORAGE_CLASS, storageClass.name()); final Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "testStorageClasses/small_" + storageClass.name() + ".txt"); runner.enqueue(getResourcePath(SAMPLE_FILE_RESOURCE_NAME), attrs); runner.run(); runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS, 1); FlowFile file = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS).get(0); Assert.assertEquals(storageClass.toString(), file.getAttribute(PutS3Object.S3_STORAGECLASS_ATTR_KEY)); runner.clearTransferState(); } }
Example #8
Source File: TestPutS3Object.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testStorageClasses() { for (StorageClass storageClass : StorageClass.values()) { runner.setProperty(PutS3Object.STORAGE_CLASS, storageClass.name()); prepareTest(); runner.run(1); ArgumentCaptor<PutObjectRequest> captureRequest = ArgumentCaptor.forClass(PutObjectRequest.class); Mockito.verify(mockS3Client, Mockito.times(1)).putObject(captureRequest.capture()); PutObjectRequest request = captureRequest.getValue(); assertEquals(storageClass.toString(), request.getStorageClass()); Mockito.reset(mockS3Client); } }
Example #9
Source File: PutS3Object.java From nifi with Apache License 2.0 | 6 votes |
public MultipartState(String buf) { String[] fields = buf.split(SEPARATOR); _uploadId = fields[0]; _filePosition = Long.parseLong(fields[1]); _partETags = new ArrayList<>(); for (String part : fields[2].split(",")) { if (part != null && !part.isEmpty()) { String[] partFields = part.split("/"); _partETags.add(new PartETag(Integer.parseInt(partFields[0]), partFields[1])); } } _partSize = Long.parseLong(fields[3]); _storageClass = StorageClass.fromValue(fields[4]); _contentLength = Long.parseLong(fields[5]); _timestamp = Long.parseLong(fields[6]); }
Example #10
Source File: S3MapReduceCpOptionsParserTest.java From circus-train with Apache License 2.0 | 6 votes |
@Before public void init() { copierOptions.put(CREDENTIAL_PROVIDER, URI.create("localjceks://file/foo/bar.jceks")); copierOptions.put(MULTIPART_UPLOAD_CHUNK_SIZE, 4096); copierOptions.put(S3_SERVER_SIDE_ENCRYPTION, true); copierOptions.put(STORAGE_CLASS, StorageClass.Glacier.toString()); copierOptions.put(TASK_BANDWIDTH, 1024); copierOptions.put(NUMBER_OF_WORKERS_PER_MAP, 12); copierOptions.put(MULTIPART_UPLOAD_THRESHOLD, 2048L); copierOptions.put(MAX_MAPS, 5); copierOptions.put(COPY_STRATEGY, "mycopystrategy"); copierOptions.put(LOG_PATH, new Path("hdfs:///tmp/logs")); copierOptions.put(REGION, Regions.EU_WEST_1.getName()); copierOptions.put(IGNORE_FAILURES, false); copierOptions.put(S3_ENDPOINT_URI, "http://s3.endpoint/"); copierOptions.put(UPLOAD_RETRY_COUNT, 5); copierOptions.put(UPLOAD_RETRY_DELAY_MS, 520); copierOptions.put(UPLOAD_BUFFER_SIZE, 1024); copierOptions.put(CANNED_ACL, "bucket-owner-full-control"); parser = new S3MapReduceCpOptionsParser(SOURCES, TARGET, DEFAULT_CREDS_PROVIDER); }
Example #11
Source File: S3MapReduceCpOptionsParserTest.java From circus-train with Apache License 2.0 | 6 votes |
private void assertDefaults(S3MapReduceCpOptions options) { assertThat(options.getCredentialsProvider(), is(URI.create("localjceks://file/foo/bar.jceks"))); assertThat(options.getMultipartUploadPartSize(), is(4096L)); assertThat(options.isS3ServerSideEncryption(), is(true)); assertThat(options.getStorageClass(), is(StorageClass.Glacier.toString())); assertThat(options.getMaxBandwidth(), is(1024L)); assertThat(options.getNumberOfUploadWorkers(), is(12)); assertThat(options.getMultipartUploadThreshold(), is(2048L)); assertThat(options.getMaxMaps(), is(5)); assertThat(options.getCopyStrategy(), is("mycopystrategy")); assertThat(options.getLogPath(), is(new Path("hdfs:///tmp/logs"))); assertThat(options.getRegion(), is(Regions.EU_WEST_1.getName())); assertThat(options.isIgnoreFailures(), is(false)); assertThat(options.getS3EndpointUri(), is(URI.create("http://s3.endpoint/"))); assertThat(options.getUploadRetryCount(), is(5)); assertThat(options.getUploadRetryDelayMs(), is(520L)); assertThat(options.getUploadBufferSize(), is(1024)); assertThat(options.getCannedAcl(), is("bucket-owner-full-control")); }
Example #12
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated() { // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #13
Source File: S3MapReduceCpOptionsTest.java From circus-train with Apache License 2.0 | 6 votes |
@Test public void defaultValues() { S3MapReduceCpOptions options = new S3MapReduceCpOptions(); assertThat(options.isHelp(), is(false)); assertThat(options.isBlocking(), is(true)); assertThat(options.getSources(), is(nullValue())); assertThat(options.getTarget(), is(nullValue())); assertThat(options.getCredentialsProvider(), is(nullValue())); assertThat(options.getMultipartUploadPartSize(), is(5L * 1024 * 1024)); assertThat(options.isS3ServerSideEncryption(), is(false)); assertThat(options.getStorageClass(), is(StorageClass.Standard.toString())); assertThat(options.getMaxBandwidth(), is(100L)); assertThat(options.getNumberOfUploadWorkers(), is(20)); assertThat(options.getMultipartUploadThreshold(), is(16L * 1024 * 1024)); assertThat(options.getMaxMaps(), is(20)); assertThat(options.getCopyStrategy(), is("uniformsize")); assertThat(options.getLogPath(), is(nullValue())); assertThat(options.getRegion(), is(nullValue())); assertThat(options.isIgnoreFailures(), is(false)); assertThat(options.getS3EndpointUri(), is(nullValue())); assertThat(options.getUploadRetryCount(), is(3)); assertThat(options.getUploadRetryDelayMs(), is(300L)); assertThat(options.getUploadBufferSize(), is(0)); assertThat(options.getCannedAcl(), is(nullValue())); assertThat(options.getAssumeRole(), is(nullValue())); }
Example #14
Source File: S3FileManagerImpl.java From entrada with GNU General Public License v3.0 | 6 votes |
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) { PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(), FileUtil.appendPath(dstDetails.getKey(), src.getName()), src); ObjectMetadata meta = new ObjectMetadata(); if (archive) { meta .setHeader(Headers.STORAGE_CLASS, StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass))); } if (encrypt) { meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } request.setMetadata(meta); try { amazonS3.putObject(request); return true; } catch (Exception e) { log.error("Error while uploading file: {}", src, e); } return false; }
Example #15
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testValidateGlacierS3FilesRestored() { // Put a 1 byte already restored Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Validate the file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); }
Example #16
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testRestoreObjectsGlacierObjectAlreadyBeingRestored() { // Put a 1 byte Glacier storage class file in S3 flagged as already being restored. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Initiate a restore request for the test S3 file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); // Validate that there is still an ongoing restore request for this object. ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null); assertTrue(objectMetadata.getOngoingRestore()); }
Example #17
Source File: AmazonKey.java From openbd-core with GNU General Public License v3.0 | 5 votes |
public StorageClass getAmazonStorageClass(String storage) { if (storage == null) return StorageClass.Standard; else if (storage.equalsIgnoreCase("standard")) return StorageClass.Standard; else if (storage.toLowerCase().startsWith("reduced")) return StorageClass.ReducedRedundancy; else return StorageClass.Standard; }
Example #18
Source File: S3BlobStoreTests.java From crate with Apache License 2.0 | 5 votes |
@Test public void testInitStorageClass() { // it should default to `standard` assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard)); // it should accept [standard, standard_ia, reduced_redundancy] assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess)); assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy)); }
Example #19
Source File: AmazonS3FileSystem.java From iaf with Apache License 2.0 | 5 votes |
public static List<String> getStorageClasses() { List<String> storageClasses = new ArrayList<String>(StorageClass.values().length); for (StorageClass storageClass : StorageClass.values()) storageClasses.add(storageClass.toString()); return storageClasses; }
Example #20
Source File: ITPutS3Object.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testStateToString() throws IOException, InitializationException { final String target = "UID-test1234567890#10001#1/PartETag-1,2/PartETag-2,3/PartETag-3,4/PartETag-4#20002#REDUCED_REDUNDANCY#30003#8675309"; PutS3Object.MultipartState state2 = new PutS3Object.MultipartState(); state2.setUploadId("UID-test1234567890"); state2.setFilePosition(10001L); state2.setTimestamp(8675309L); for (Integer partNum = 1; partNum < 5; partNum++) { state2.addPartETag(new PartETag(partNum, "PartETag-" + partNum.toString())); } state2.setPartSize(20002L); state2.setStorageClass(StorageClass.ReducedRedundancy); state2.setContentLength(30003L); Assert.assertEquals(target, state2.toString()); }
Example #21
Source File: ITPutS3Object.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testStateDefaults() { PutS3Object.MultipartState state1 = new PutS3Object.MultipartState(); Assert.assertEquals(state1.getUploadId(), ""); Assert.assertEquals(state1.getFilePosition(), (Long) 0L); Assert.assertEquals(state1.getPartETags().size(), 0L); Assert.assertEquals(state1.getPartSize(), (Long) 0L); Assert.assertEquals(state1.getStorageClass().toString(), StorageClass.Standard.toString()); Assert.assertEquals(state1.getContentLength(), (Long) 0L); }
Example #22
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress() { // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #23
Source File: PutS3Object.java From nifi with Apache License 2.0 | 5 votes |
public MultipartState() { _uploadId = ""; _filePosition = 0L; _partETags = new ArrayList<>(); _partSize = 0L; _storageClass = StorageClass.Standard; _contentLength = 0L; _timestamp = System.currentTimeMillis(); }
Example #24
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testValidateGlacierS3FilesRestoredAmazonServiceException() { // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object. String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION); // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored for a mocked S3 file // that triggers an Amazon service exception when we request S3 metadata for the object. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(testKey))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception."); } catch (IllegalStateException e) { assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #25
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testRestoreObjectsNonGlacierNonDeepArchiveObject() { // Put a 1 byte non-Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to initiate a restore request for a non-Glacier file. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail("Should throw an IllegalStateException when file has a non-Glacier storage class."); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: object is not in Glacier or DeepArchive (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #26
Source File: S3BlobStoreTests.java From crate with Apache License 2.0 | 5 votes |
/** * Creates a new {@link S3BlobStore} with random settings. * <p> * The blobstore uses a {@link MockAmazonS3} client. */ public static S3BlobStore randomMockS3BlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); ByteSizeValue bufferSize = new ByteSizeValue(randomIntBetween(5, 100), ByteSizeUnit.MB); boolean serverSideEncryption = randomBoolean(); String cannedACL = null; if (randomBoolean()) { cannedACL = randomFrom(CannedAccessControlList.values()).toString(); } String storageClass = null; if (randomBoolean()) { storageClass = randomValueOtherThan( StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); } final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); final S3Service service = new S3Service() { @Override public synchronized AmazonS3Reference client(RepositoryMetaData metadata) { return new AmazonS3Reference(client); } }; return new S3BlobStore( service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, new RepositoryMetaData(bucket, "s3", Settings.EMPTY)); }
Example #27
Source File: MockAmazonS3.java From presto with Apache License 2.0 | 5 votes |
@Override public ListObjectsV2Result listObjectsV2(ListObjectsV2Request listObjectsV2Request) { final String continuationToken = "continue"; ListObjectsV2Result listingV2 = new ListObjectsV2Result(); if (continuationToken.equals(listObjectsV2Request.getContinuationToken())) { S3ObjectSummary standardTwo = new S3ObjectSummary(); standardTwo.setStorageClass(StorageClass.Standard.toString()); standardTwo.setKey("test/standardTwo"); standardTwo.setLastModified(new Date()); listingV2.getObjectSummaries().add(standardTwo); if (hasGlacierObjects) { S3ObjectSummary glacier = new S3ObjectSummary(); glacier.setStorageClass(StorageClass.Glacier.toString()); glacier.setKey("test/glacier"); glacier.setLastModified(new Date()); listingV2.getObjectSummaries().add(glacier); } } else { S3ObjectSummary standardOne = new S3ObjectSummary(); standardOne.setStorageClass(StorageClass.Standard.toString()); standardOne.setKey("test/standardOne"); standardOne.setLastModified(new Date()); listingV2.getObjectSummaries().add(standardOne); listingV2.setTruncated(true); listingV2.setNextContinuationToken(continuationToken); } return listingV2; }
Example #28
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testRestoreObjectsAmazonServiceException() { // Build a mock file path that triggers an Amazon service exception when we request to restore an object. String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION); // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to initiate a restore request for a mocked S3 file that would trigger an Amazon service exception when we request to restore an object. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(testKey))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail("Should throw an IllegalStateException when an S3 restore object operation fails."); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example #29
Source File: ITPutS3Object.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testStorageClassesMultipart() throws IOException { final TestRunner runner = TestRunners.newTestRunner(new PutS3Object()); runner.setProperty(PutS3Object.CREDENTIALS_FILE, CREDENTIALS_FILE); runner.setProperty(PutS3Object.REGION, REGION); runner.setProperty(PutS3Object.BUCKET, BUCKET_NAME); runner.setProperty(PutS3Object.MULTIPART_THRESHOLD, "50 MB"); runner.setProperty(PutS3Object.MULTIPART_PART_SIZE, "50 MB"); Assert.assertTrue(runner.setProperty("x-custom-prop", "hello").isValid()); for (StorageClass storageClass : StorageClass.values()) { runner.setProperty(PutS3Object.STORAGE_CLASS, storageClass.name()); final Map<String, String> attrs = new HashMap<>(); attrs.put("filename", "testStorageClasses/large_" + storageClass.name() + ".dat"); runner.enqueue(new byte[50 * 1024 * 1024 + 1], attrs); runner.run(); runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS, 1); FlowFile file = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS).get(0); Assert.assertEquals(storageClass.toString(), file.getAttribute(PutS3Object.S3_STORAGECLASS_ATTR_KEY)); runner.clearTransferState(); } }
Example #30
Source File: S3DaoImpl.java From herd with Apache License 2.0 | 5 votes |
/** * Prepares the object metadata for server side encryption and reduced redundancy storage. * * @param params the parameters. * @param metadata the metadata to prepare. */ private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata) { // Set the server side encryption if (params.getKmsKeyId() != null) { /* * TODO Use proper way to set KMS once AWS provides a way. * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID. * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is * correctly uploaded. * To get around this, a system property defined at * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set. */ metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim()); } else { metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); } // If specified, set the metadata to use RRS. if (Boolean.TRUE.equals(params.isUseRrs())) { // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only // way to do it. However, setHeader() is flagged as For Internal Use Only metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString()); } }