Java Code Examples for com.amazonaws.services.s3.model.ObjectMetadata#setHeader()
The following examples show how to use
com.amazonaws.services.s3.model.ObjectMetadata#setHeader() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testValidateGlacierS3FilesRestored() { // Put a 1 byte already restored Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Validate the file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); }
Example 2
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testRestoreObjectsGlacierObjectAlreadyBeingRestored() { // Put a 1 byte Glacier storage class file in S3 flagged as already being restored. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Initiate a restore request for the test S3 file. S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); // Validate that there is still an ongoing restore request for this object. ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null); assertTrue(objectMetadata.getOngoingRestore()); }
Example 3
Source File: COSAPIClient.java From stocator with Apache License 2.0 | 6 votes |
/** * Start the multipart upload process. * @return the upload result containing the ID * @throws IOException IO problem */ String initiateMultiPartUpload(Boolean atomicWrite, String etag) throws IOException { LOG.debug("Initiating Multipart upload"); ObjectMetadata om = newObjectMetadata(-1); // if atomic write is enabled use the etag to ensure put request is atomic if (atomicWrite) { if (etag != null) { LOG.debug("Atomic write - setting If-Match header"); om.setHeader("If-Match", etag); } else { LOG.debug("Atomic write - setting If-None-Match header"); om.setHeader("If-None-Match", "*"); } } final InitiateMultipartUploadRequest initiateMPURequest = new InitiateMultipartUploadRequest(mBucket, key, om); try { return mClient.initiateMultipartUpload(initiateMPURequest) .getUploadId(); } catch (AmazonClientException ace) { throw translateException("initiate MultiPartUpload", key, ace); } }
Example 4
Source File: S3DaoTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated() { // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example 5
Source File: S3FileManagerImpl.java From entrada with GNU General Public License v3.0 | 6 votes |
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) { PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(), FileUtil.appendPath(dstDetails.getKey(), src.getName()), src); ObjectMetadata meta = new ObjectMetadata(); if (archive) { meta .setHeader(Headers.STORAGE_CLASS, StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass))); } if (encrypt) { meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } request.setMetadata(meta); try { amazonS3.putObject(request); return true; } catch (Exception e) { log.error("Error while uploading file: {}", src, e); } return false; }
Example 6
Source File: S3Publisher.java From hollow-reference-implementation with Apache License 2.0 | 5 votes |
public void publishSnapshot(Blob blob) { String objectName = getS3ObjectName(blobNamespace, "snapshot", blob.getToVersion()); ObjectMetadata metadata = new ObjectMetadata(); metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion())); metadata.setHeader("Content-Length", blob.getFile().length()); uploadFile(blob.getFile(), objectName, metadata); /// now we update the snapshot index updateSnapshotIndex(blob.getToVersion()); }
Example 7
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testRestoreObjectsAmazonServiceException() { // Build a mock file path that triggers an Amazon service exception when we request to restore an object. String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION); // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to initiate a restore request for a mocked S3 file that would trigger an Amazon service exception when we request to restore an object. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(testKey))); s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail("Should throw an IllegalStateException when an S3 restore object operation fails."); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example 8
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress() { // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true). ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(true); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(TARGET_S3_KEY))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored."); } catch (IllegalArgumentException e) { assertEquals(String .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}", TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example 9
Source File: S3DaoImpl.java From herd with Apache License 2.0 | 5 votes |
/** * Prepares the object metadata for server side encryption and reduced redundancy storage. * * @param params the parameters. * @param metadata the metadata to prepare. */ private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata) { // Set the server side encryption if (params.getKmsKeyId() != null) { /* * TODO Use proper way to set KMS once AWS provides a way. * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID. * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is * correctly uploaded. * To get around this, a system property defined at * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set. */ metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm()); metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim()); } else { metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); } // If specified, set the metadata to use RRS. if (Boolean.TRUE.equals(params.isUseRrs())) { // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only // way to do it. However, setHeader() is flagged as For Internal Use Only metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString()); } }
Example 10
Source File: S3DaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testValidateGlacierS3FilesRestoredAmazonServiceException() { // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object. String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION); // Put a 1 byte Glacier storage class file in S3. ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null); // Try to validate if the Glacier S3 file is already restored for a mocked S3 file // that triggers an Amazon service exception when we request S3 metadata for the object. try { S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto(); params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName()); params.setFiles(Arrays.asList(new File(testKey))); s3Dao.validateGlacierS3FilesRestored(params); fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception."); } catch (IllegalStateException e) { assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage()); } }
Example 11
Source File: S3Publisher.java From hollow-reference-implementation with Apache License 2.0 | 5 votes |
public void publishReverseDelta(Blob blob) { String objectName = getS3ObjectName(blobNamespace, "reversedelta", blob.getFromVersion()); ObjectMetadata metadata = new ObjectMetadata(); metadata.addUserMetadata("from_state", String.valueOf(blob.getFromVersion())); metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion())); metadata.setHeader("Content-Length", blob.getFile().length()); uploadFile(blob.getFile(), objectName, metadata); }
Example 12
Source File: S3Publisher.java From hollow-reference-implementation with Apache License 2.0 | 5 votes |
public void publishDelta(Blob blob) { String objectName = getS3ObjectName(blobNamespace, "delta", blob.getFromVersion()); ObjectMetadata metadata = new ObjectMetadata(); metadata.addUserMetadata("from_state", String.valueOf(blob.getFromVersion())); metadata.addUserMetadata("to_state", String.valueOf(blob.getToVersion())); metadata.setHeader("Content-Length", blob.getFile().length()); uploadFile(blob.getFile(), objectName, metadata); }
Example 13
Source File: S3BaseUploadCallable.java From jobcacher-plugin with MIT License | 5 votes |
protected ObjectMetadata buildMetadata(File file) throws IOException { final ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(Mimetypes.getInstance().getMimetype(file.getName())); metadata.setContentLength(file.length()); metadata.setLastModified(new Date(file.lastModified())); if (storageClass != null && !storageClass.isEmpty()) { metadata.setHeader("x-amz-storage-class", storageClass); } if (useServerSideEncryption) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } for (Map.Entry<String, String> entry : userMetadata.entrySet()) { final String key = entry.getKey().toLowerCase(); switch (key) { case "cache-control": metadata.setCacheControl(entry.getValue()); break; case "expires": try { final Date expires = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z").parse(entry.getValue()); metadata.setHttpExpiresDate(expires); } catch (ParseException e) { metadata.addUserMetadata(entry.getKey(), entry.getValue()); } break; case "content-encoding": metadata.setContentEncoding(entry.getValue()); break; case "content-type": metadata.setContentType(entry.getValue()); default: metadata.addUserMetadata(entry.getKey(), entry.getValue()); break; } } return metadata; }
Example 14
Source File: BusinessObjectDataServiceRestoreBusinessObjectDataTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreBusinessObjectDataAmazonServiceException() throws Exception { // Create S3FileTransferRequestParamsDto to access the S3 bucket location. // Since test S3 key prefix represents a directory, we add a trailing '/' character to it. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build(); // Create a business object data key. BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION); // Create database entities required for testing. BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper.createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey); // Get the storage unit entity. StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity); // Get the expected S3 key prefix for the business object data key. String s3KeyPrefix = AbstractServiceTest .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY, AbstractServiceTest.NO_SUB_PARTITION_KEYS); // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we request to restore objects. storageFileDaoTestHelper .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION), FILE_SIZE_1_KB, ROW_COUNT); try { // Put relative Glacier storage class files into the Glacier S3 bucket flagged as not being currently restored. for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(), new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT); } // Try to initiate a restore request for the business object data when S3 restore object operation fails with an Amazon service exception. try { businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail(); } catch (IllegalStateException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s/%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage()); } // Validate that the storage unit status is still ARCHIVED. assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode()); } finally { // Delete test files from S3 storage. if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) { s3Dao.deleteDirectory(s3FileTransferRequestParamsDto); } s3Operations.rollback(); } }
Example 15
Source File: BusinessObjectDataServiceRestoreBusinessObjectDataTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreBusinessObjectDataNonGlacierNonDeepArchiveStorageClass() throws Exception { // Create S3FileTransferRequestParamsDto to access the S3 bucket. // Since test S3 key prefix represents a directory, we add a trailing '/' character to it. S3FileTransferRequestParamsDto glacierS3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(S3_BUCKET_NAME + "/" + TEST_S3_KEY_PREFIX + "/").build(); // Create a business object data key. BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION); // Create database entities required for testing. BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper .createDatabaseEntitiesForInitiateRestoreTesting(businessObjectDataKey, AbstractServiceTest.STORAGE_NAME, AbstractServiceTest.S3_BUCKET_NAME, StorageUnitStatusEntity.ARCHIVED, Collections.singletonList(LOCAL_FILE)); // Get the storage unit entity. StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity); try { // Put relative non-Glacier non-DeepArchive storage class files into the S3 bucket. for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard); metadata.setOngoingRestore(false); s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(), new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), NO_S3_CLIENT); } // Try to initiate a restore request for the business object data. try { businessObjectDataService.restoreBusinessObjectData(businessObjectDataKey, EXPIRATION_IN_DAYS, ARCHIVE_RETRIEVAL_OPTION); fail(); } catch (IllegalArgumentException e) { assertEquals(String.format("S3 file \"%s\" is not archived (found %s storage class when expecting %s or %s). S3 Bucket Name: \"%s\"", Iterables.get(storageUnitEntity.getStorageFiles(), 0).getPath(), StorageClass.Standard.toString(), StorageClass.Glacier.toString(), StorageClass.DeepArchive.toString(), S3_BUCKET_NAME), e.getMessage()); } // Validate that the storage unit status is still ARCHIVED. assertEquals(StorageUnitStatusEntity.ARCHIVED, storageUnitEntity.getStatus().getCode()); } finally { // Delete test files from S3 storage. if (!s3Dao.listDirectory(glacierS3FileTransferRequestParamsDto).isEmpty()) { s3Dao.deleteDirectory(glacierS3FileTransferRequestParamsDto); } s3Operations.rollback(); } }
Example 16
Source File: BusinessObjectDataFinalizeRestoreServiceTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testFinalizeRestoreAmazonServiceException() throws Exception { // Create a business object data key. BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, NO_SUBPARTITION_VALUES, DATA_VERSION); // Get the expected S3 key prefix for the business object data key. String s3KeyPrefix = AbstractServiceTest .getExpectedS3KeyPrefix(businessObjectDataKey, AbstractServiceTest.DATA_PROVIDER_NAME, AbstractServiceTest.PARTITION_KEY, AbstractServiceTest.NO_SUB_PARTITION_KEYS); // Create S3FileTransferRequestParamsDto to access the S3 bucket. // Since test S3 key prefix represents a directory, we add a trailing '/' character to it. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = S3FileTransferRequestParamsDto.builder().withS3BucketName(S3_BUCKET_NAME).withS3KeyPrefix(s3KeyPrefix + "/").build(); // Create database entities required for testing. BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataServiceTestHelper.createDatabaseEntitiesForFinalizeRestoreTesting(businessObjectDataKey); // Get the storage unit entity. StorageUnitEntity storageUnitEntity = storageUnitDaoHelper.getStorageUnitEntity(STORAGE_NAME, businessObjectDataEntity); // Add a mocked S3 file name to the storage unit that would trigger an Amazon service exception when we try to get metadata for the object. storageFileDaoTestHelper .createStorageFileEntity(storageUnitEntity, String.format("%s/%s", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION), FILE_SIZE_1_KB, ROW_COUNT); // Create a storage unit key. BusinessObjectDataStorageUnitKey storageUnitKey = storageUnitHelper.createStorageUnitKey(businessObjectDataKey, STORAGE_NAME); try { // Put relative "already restored" Glacier storage class S3 files in the S3 bucket. for (StorageFileEntity storageFileEntity : storageUnitEntity.getStorageFiles()) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier); metadata.setOngoingRestore(false); s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, storageFileEntity.getPath(), new ByteArrayInputStream(new byte[storageFileEntity.getFileSizeBytes().intValue()]), metadata), null); } // Try to finalize a restore for the storage unit when get S3 object metadata operation fails with an Amazon service exception. try { businessObjectDataFinalizeRestoreService.finalizeRestore(storageUnitKey); fail("Should throw an IllegalStateException when a get S3 object metadata operation fails."); } catch (IllegalStateException e) { assertEquals(String.format("Fail to check restore status for \"%s/%s\" key in \"%s\" bucket. " + "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", s3KeyPrefix, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION, S3_BUCKET_NAME), e.getMessage()); } // Validate that the storage unit status is still in RESTORING state. assertEquals(StorageUnitStatusEntity.RESTORING, storageUnitEntity.getStatus().getCode()); // Validate that we have the S3 files at the expected S3 location. assertEquals(storageUnitEntity.getStorageFiles().size(), s3Dao.listDirectory(s3FileTransferRequestParamsDto).size()); } finally { // Delete test files from S3 storage. if (!s3Dao.listDirectory(s3FileTransferRequestParamsDto).isEmpty()) { s3Dao.deleteDirectory(s3FileTransferRequestParamsDto); } s3Operations.rollback(); } }
Example 17
Source File: COSBlockOutputStream.java From stocator with Apache License 2.0 | 4 votes |
/** * Upload the current block as a single PUT request; if the buffer is empty a * 0-byte PUT will be invoked, as it is needed to create an entry at the far * end. * * @throws IOException any problem */ private void putObject() throws IOException { LOG.debug("Executing regular upload for {}", writeOperationHelper); final COSDataBlocks.DataBlock block = getActiveBlock(); int size = block.dataSize(); final COSDataBlocks.BlockUploadData uploadData = block.startUpload(); final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size); final ObjectMetadata om = new ObjectMetadata(); om.setUserMetadata(mMetadata); if (contentType != null && !contentType.isEmpty()) { om.setContentType(contentType); } else { om.setContentType("application/octet-stream"); } // if atomic write is enabled use the etag to ensure put request is atomic if (mAtomicWriteEnabled) { if (mEtag != null) { LOG.debug("Atomic write - setting If-Match header"); om.setHeader("If-Match", mEtag); } else { LOG.debug("Atomic write - setting If-None-Match header"); om.setHeader("If-None-Match", "*"); } } putObjectRequest.setMetadata(om); ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() { @Override public PutObjectResult call() throws Exception { PutObjectResult result; try { // the putObject call automatically closes the input // stream afterwards. result = writeOperationHelper.putObject(putObjectRequest); } finally { closeAll(LOG, uploadData, block); } return result; } }); clearActiveBlock(); // wait for completion try { putObjectResult.get(); } catch (InterruptedException ie) { LOG.warn("Interrupted object upload", ie); Thread.currentThread().interrupt(); } catch (ExecutionException ee) { throw extractException("regular upload", key, ee); } }
Example 18
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Create an Amazon S3 Exception AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage); amazonS3Exception.setStatusCode(statusCode); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); try { // Call the method under test. s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString()); // If this is not a restore already in progress exception message (409) then we should have caught an exception. // Else if this is a restore already in progress message (409) then continue as usual. if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE)) { // Should not be here. Fail! fail(); } else { RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString()) ? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); } } catch (IllegalStateException illegalStateException) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " + "(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null", TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage()); } // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example 19
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption() { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata with DeepArchive storage class. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations) .restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class)); try { s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TEST_FILE, S3_BUCKET_NAME), e.getMessage()); } }
Example 20
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
/** * Run restore objects method * * @param archiveRetrievalOption the archive retrieval option */ private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption); RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption) ? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }