com.amazonaws.services.s3.model.Tier Java Examples
The following examples show how to use
com.amazonaws.services.s3.model.Tier.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BusinessObjectDataInitiateRestoreHelperServiceImplTest.java From herd with Apache License 2.0 | 6 votes |
@Test public void testPrepareToInitiateRestoreInvalidArchiveRetrievalOption() { // Create a business object data key. BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION); // Specify the expected exception. expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(is(String.format(String.format("The archive retrieval option value \"%s\" is invalid. " + "Valid archive retrieval option values are:%s", INVALID_ARCHIVE_RETRIEVAL_OPTION, Stream.of(Tier.values()).map(Enum::name).collect(Collectors.toList()))))); businessObjectDataInitiateRestoreHelperServiceImpl .prepareToInitiateRestore(businessObjectDataKey, EXPIRATION_IN_DAYS, INVALID_ARCHIVE_RETRIEVAL_OPTION); // Verify the external calls verify(businessObjectDataHelper).validateBusinessObjectDataKey(businessObjectDataKey, true, true); verifyNoMoreInteractionsHelper(); }
Example #2
Source File: AmazonS3FileSystem.java From iaf with Apache License 2.0 | 5 votes |
public static List<String> getTiers() { List<String> tiers = new ArrayList<String>(Tier.values().length); for (Tier tier : Tier.values()) tiers.add(tier.toString()); return tiers; }
Example #3
Source File: BusinessObjectDataInitiateRestoreHelperServiceImpl.java From herd with Apache License 2.0 | 4 votes |
/** * Prepares for the business object data initiate a restore request by validating the business object data along with other related database entities. The * method also creates and returns a business object data restore DTO. * * @param businessObjectDataKey the business object data key * @param expirationInDays the the time, in days, between when the business object data is restored to the S3 bucket and when it expires * @param archiveRetrievalOption the archive retrieval option when restoring an archived object. * * @return the DTO that holds various parameters needed to perform a business object data restore */ protected BusinessObjectDataRestoreDto prepareToInitiateRestoreImpl(BusinessObjectDataKey businessObjectDataKey, Integer expirationInDays, String archiveRetrievalOption) { // Validate and trim the business object data key. businessObjectDataHelper.validateBusinessObjectDataKey(businessObjectDataKey, true, true); // If expiration time is not specified, use the configured default value. int localExpirationInDays = expirationInDays != null ? expirationInDays : herdStringHelper.getConfigurationValueAsInteger(ConfigurationValue.BDATA_RESTORE_EXPIRATION_IN_DAYS_DEFAULT); // Validate the expiration time. Assert.isTrue(localExpirationInDays > 0, "Expiration in days value must be a positive integer."); // Trim the whitespaces if (archiveRetrievalOption != null) { archiveRetrievalOption = archiveRetrievalOption.trim(); } // Validate the archive retrieval option if (StringUtils.isNotEmpty(archiveRetrievalOption)) { try { Tier.fromValue(archiveRetrievalOption); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException(String.format("The archive retrieval option value \"%s\" is invalid. " + "Valid archive retrieval option values are:%s", archiveRetrievalOption, Stream.of(Tier.values()) .map(Enum::name) .collect(Collectors.toList()))); } } // Retrieve the business object data and ensure it exists. BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataDaoHelper.getBusinessObjectDataEntity(businessObjectDataKey); // Retrieve and validate a Glacier storage unit for this business object data. StorageUnitEntity storageUnitEntity = getStorageUnit(businessObjectDataEntity); // Get the storage name. String storageName = storageUnitEntity.getStorage().getName(); // Validate that S3 storage has S3 bucket name configured. // Please note that since S3 bucket name attribute value is required we pass a "true" flag. String s3BucketName = storageHelper .getStorageAttributeValueByName(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME), storageUnitEntity.getStorage(), true); // Get storage specific S3 key prefix for this business object data. String s3KeyPrefix = s3KeyPrefixHelper.buildS3KeyPrefix(storageUnitEntity.getStorage(), businessObjectDataEntity.getBusinessObjectFormat(), businessObjectDataKey); // Retrieve and validate storage files registered with the storage unit. List<StorageFile> storageFiles = storageFileHelper.getAndValidateStorageFiles(storageUnitEntity, s3KeyPrefix, storageName, businessObjectDataKey); // Validate that this storage does not have any other registered storage files that // start with the S3 key prefix, but belong to other business object data instances. storageFileDaoHelper.validateStorageFilesCount(storageName, businessObjectDataKey, s3KeyPrefix, storageFiles.size()); // Set the expiration time for the restored storage unit. Timestamp currentTime = new Timestamp(System.currentTimeMillis()); storageUnitEntity.setRestoreExpirationOn(HerdDateUtils.addDays(currentTime, localExpirationInDays)); // Retrieve and ensure the RESTORING storage unit status entity exists. StorageUnitStatusEntity newStorageUnitStatusEntity = storageUnitStatusDaoHelper.getStorageUnitStatusEntity(StorageUnitStatusEntity.RESTORING); // Save the old storage unit status value. String oldOriginStorageUnitStatus = storageUnitEntity.getStatus().getCode(); // Update the S3 storage unit status to RESTORING. storageUnitDaoHelper.updateStorageUnitStatus(storageUnitEntity, newStorageUnitStatusEntity, StorageUnitStatusEntity.RESTORING); // Build the business object data restore parameters DTO. BusinessObjectDataRestoreDto businessObjectDataRestoreDto = new BusinessObjectDataRestoreDto(); businessObjectDataRestoreDto.setBusinessObjectDataKey(businessObjectDataHelper.getBusinessObjectDataKey(businessObjectDataEntity)); businessObjectDataRestoreDto.setStorageName(storageName); businessObjectDataRestoreDto.setS3Endpoint(configurationHelper.getProperty(ConfigurationValue.S3_ENDPOINT)); businessObjectDataRestoreDto.setS3BucketName(s3BucketName); businessObjectDataRestoreDto.setS3KeyPrefix(s3KeyPrefix); businessObjectDataRestoreDto.setStorageFiles(storageFiles); businessObjectDataRestoreDto.setArchiveRetrievalOption(archiveRetrievalOption); businessObjectDataRestoreDto.setNewStorageUnitStatus(newStorageUnitStatusEntity.getCode()); businessObjectDataRestoreDto.setOldStorageUnitStatus(oldOriginStorageUnitStatus); // Return the parameters DTO. return businessObjectDataRestoreDto; }
Example #4
Source File: BusinessObjectDataInitiateRestoreHelperServiceImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testPrepareToInitiateRestoreBulkArchiveRetrievalOption() { validatePrepareToInitiateRestoreWithValidArchiveRetrievalOption(Tier.Bulk.toString()); }
Example #5
Source File: BusinessObjectDataInitiateRestoreHelperServiceImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testPrepareToInitiateRestoreStandardArchiveRetrievalOption() { validatePrepareToInitiateRestoreWithValidArchiveRetrievalOption(Tier.Standard.toString()); }
Example #6
Source File: BusinessObjectDataInitiateRestoreHelperServiceImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testPrepareToInitiateRestoreExpeditedArchiveRetrievalOption() { validatePrepareToInitiateRestoreWithValidArchiveRetrievalOption(Tier.Expedited.toString()); }
Example #7
Source File: BusinessObjectDataInitiateRestoreHelperServiceImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testPrepareToInitiateRestoreWhitespacesArchiveRetrievalOption() { // Create an archive retrieval option with whitespaces String archiveRetrievalOption = " " + Tier.Expedited.toString() + " "; // Create a business object data key. BusinessObjectDataKey businessObjectDataKey = new BusinessObjectDataKey(BDEF_NAMESPACE, BDEF_NAME, FORMAT_USAGE_CODE, FORMAT_FILE_TYPE_CODE, FORMAT_VERSION, PARTITION_VALUE, SUBPARTITION_VALUES, DATA_VERSION); BusinessObjectDataEntity businessObjectDataEntity = new BusinessObjectDataEntity(); // Create a single storage unit StorageEntity storageEntity = new StorageEntity(); storageEntity.setName(STORAGE_NAME); StorageUnitStatusEntity storageUnitStatusEntity = new StorageUnitStatusEntity(); storageUnitStatusEntity.setCode(StorageUnitStatusEntity.ARCHIVED); StorageUnitEntity storageUnitEntity = new StorageUnitEntity(); storageUnitEntity.setStorage(storageEntity); storageUnitEntity.setStatus(storageUnitStatusEntity); List<StorageUnitEntity> storageUnitEntities = Collections.singletonList(storageUnitEntity); List<StorageFile> storageFiles = Collections.singletonList(new StorageFile(S3_KEY, FILE_SIZE, ROW_COUNT)); StorageUnitStatusEntity newStorageUnitStatusEntity = new StorageUnitStatusEntity(); newStorageUnitStatusEntity.setCode(StorageUnitStatusEntity.RESTORING); // Mock the external calls. when(businessObjectDataDaoHelper.getBusinessObjectDataEntity(businessObjectDataKey)).thenReturn(businessObjectDataEntity); when(storageUnitDao.getStorageUnitsByStoragePlatformAndBusinessObjectData(StoragePlatformEntity.S3, businessObjectDataEntity)) .thenReturn(storageUnitEntities); when(configurationHelper.getProperty(ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME)) .thenReturn((String) ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME.getDefaultValue()); when(storageHelper.getStorageAttributeValueByName((String) ConfigurationValue.S3_ATTRIBUTE_NAME_BUCKET_NAME.getDefaultValue(), storageEntity, true)) .thenReturn(S3_BUCKET_NAME); when(s3KeyPrefixHelper.buildS3KeyPrefix(storageEntity, businessObjectDataEntity.getBusinessObjectFormat(), businessObjectDataKey)) .thenReturn(S3_KEY_PREFIX); when(storageFileHelper.getAndValidateStorageFiles(storageUnitEntity, S3_KEY_PREFIX, STORAGE_NAME, businessObjectDataKey)).thenReturn(storageFiles); when(storageUnitStatusDaoHelper.getStorageUnitStatusEntity(StorageUnitStatusEntity.RESTORING)).thenReturn(newStorageUnitStatusEntity); when(businessObjectDataHelper.getBusinessObjectDataKey(businessObjectDataEntity)).thenReturn(businessObjectDataKey); when(configurationHelper.getProperty(ConfigurationValue.S3_ENDPOINT)).thenReturn(S3_ENDPOINT); // Make the archive retrieval option null BusinessObjectDataRestoreDto businessObjectDataRestoreDto = businessObjectDataInitiateRestoreHelperServiceImpl.prepareToInitiateRestore(businessObjectDataKey, EXPIRATION_IN_DAYS, archiveRetrievalOption); // Validate the businessObjectDataRestoreDto assertEquals(businessObjectDataKey, businessObjectDataRestoreDto.getBusinessObjectDataKey()); assertEquals(STORAGE_NAME, businessObjectDataRestoreDto.getStorageName()); assertEquals(S3_ENDPOINT, businessObjectDataRestoreDto.getS3Endpoint()); assertEquals(S3_BUCKET_NAME, businessObjectDataRestoreDto.getS3BucketName()); assertEquals(S3_KEY_PREFIX, businessObjectDataRestoreDto.getS3KeyPrefix()); assertEquals(storageFiles, businessObjectDataRestoreDto.getStorageFiles()); // Verify the whitespaces are trimmed assertEquals(Tier.Expedited.toString(), businessObjectDataRestoreDto.getArchiveRetrievalOption()); assertEquals(StorageUnitStatusEntity.RESTORING, businessObjectDataRestoreDto.getNewStorageUnitStatus()); assertEquals(StorageUnitStatusEntity.ARCHIVED, businessObjectDataRestoreDto.getOldStorageUnitStatus()); }
Example #8
Source File: S3DaoImpl.java From herd with Apache License 2.0 | 4 votes |
@Override public void restoreObjects(final S3FileTransferRequestParamsDto params, int expirationInDays, String archiveRetrievalOption) { LOGGER.info("Restoring a list of objects in S3... s3KeyPrefix=\"{}\" s3BucketName=\"{}\" s3KeyCount={}", params.getS3KeyPrefix(), params.getS3BucketName(), params.getFiles().size()); if (!CollectionUtils.isEmpty(params.getFiles())) { // Initialize a key value pair for the error message in the catch block. String key = params.getFiles().get(0).getPath().replaceAll("\\\\", "/"); try { // Create an S3 client. AmazonS3Client s3Client = getAmazonS3(params); // Create a restore object request. RestoreObjectRequest requestRestore = new RestoreObjectRequest(params.getS3BucketName(), null, expirationInDays); // Make Bulk the default archive retrieval option if the option is not provided requestRestore.setGlacierJobParameters(new GlacierJobParameters().withTier( StringUtils.isNotEmpty(archiveRetrievalOption) ? archiveRetrievalOption : Tier.Bulk.toString())); try { for (File file : params.getFiles()) { key = file.getPath().replaceAll("\\\\", "/"); ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(params.getS3BucketName(), key, s3Client); // Request a restore for objects that are not already being restored. if (BooleanUtils.isNotTrue(objectMetadata.getOngoingRestore())) { requestRestore.setKey(key); try { // Try the S3 restore operation on this file. s3Operations.restoreObject(requestRestore, s3Client); } catch (AmazonS3Exception amazonS3Exception) { // If this exception has a status code of 409, log the information and continue to the next file. if (amazonS3Exception.getStatusCode() == HttpStatus.SC_CONFLICT) { LOGGER.info("Restore already in progress for file with s3Key=\"{}\".", key); } // Else, we need to propagate the exception to the next level of try/catch block. else { throw new Exception(amazonS3Exception); } } } } } finally { s3Client.shutdown(); } } catch (Exception e) { if (StringUtils.contains(e.getMessage(), "Retrieval option is not supported by this storage class")) { throw new IllegalArgumentException(String .format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(), e.getMessage()), e); } else { throw new IllegalStateException(String .format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(), e.getMessage()), e); } } } }
Example #9
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption() { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata with DeepArchive storage class. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations) .restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class)); try { s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TEST_FILE, S3_BUCKET_NAME), e.getMessage()); } }
Example #10
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsBulkArchiveRetrievalOption() { runRestoreObjects(Tier.Bulk.toString(), null); }
Example #11
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsStandardArchiveRetrievalOption() { runRestoreObjects(Tier.Standard.toString(), null); }
Example #12
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsExpeditedArchiveRetrievalOption() { runRestoreObjects(Tier.Expedited.toString(), null); }
Example #13
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsInDeepArchiveBulkArchiveRetrievalOption() { runRestoreObjects(Tier.Bulk.toString(), StorageClass.DeepArchive); }
Example #14
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Create an Amazon S3 Exception AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage); amazonS3Exception.setStatusCode(statusCode); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); try { // Call the method under test. s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString()); // If this is not a restore already in progress exception message (409) then we should have caught an exception. // Else if this is a restore already in progress message (409) then continue as usual. if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE)) { // Should not be here. Fail! fail(); } else { RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString()) ? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); } } catch (IllegalStateException illegalStateException) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " + "(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null", TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage()); } // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #15
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
/** * Run restore objects method * * @param archiveRetrievalOption the archive retrieval option */ private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption); RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption) ? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #16
Source File: S3RestoreObjectRequest.java From super-cloudops with Apache License 2.0 | 2 votes |
/** * Sets the glacier retrieval tier. * * @param tier * New value for tier. * @return This object for method chaining. */ public RestoreObjectRequest withTier(Tier tier) { this.tier = tier == null ? null : tier.toString(); return this; }