com.amazonaws.retry.RetryPolicy Java Examples
The following examples show how to use
com.amazonaws.retry.RetryPolicy.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GlueTestClientFactory.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 7 votes |
private static ClientConfiguration createGatewayTimeoutRetryableConfiguration() { ClientConfiguration retryableConfig = new ClientConfiguration(); RetryPolicy.RetryCondition retryCondition = new PredefinedRetryPolicies.SDKDefaultRetryCondition() { @Override public boolean shouldRetry(AmazonWebServiceRequest originalRequest, AmazonClientException exception, int retriesAttempted) { if (super.shouldRetry(originalRequest, exception, retriesAttempted)) { return true; } if (exception != null && exception instanceof AmazonServiceException) { AmazonServiceException ase = (AmazonServiceException) exception; if (ase.getStatusCode() == SC_GATEWAY_TIMEOUT) { return true; } } return false; } }; RetryPolicy retryPolicy = new RetryPolicy(retryCondition, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY, true); retryableConfig.setRetryPolicy(retryPolicy); return retryableConfig; }
Example #2
Source File: AWSClientFactory.java From aws-codebuild-jenkins-plugin with Apache License 2.0 | 6 votes |
private ClientConfiguration getClientConfiguration() { String projectVersion = ""; try(InputStream stream = this.getClass().getResourceAsStream(POM_PROPERTIES)) { properties.load(stream); projectVersion = "/" + properties.getProperty("version"); } catch (IOException e) {} ClientConfiguration clientConfig = new ClientConfiguration() .withUserAgentPrefix("CodeBuild-Jenkins-Plugin" + projectVersion) .withProxyHost(proxyHost) .withRetryPolicy(new RetryPolicy(new CodeBuildClientRetryCondition(), new PredefinedBackoffStrategies.ExponentialBackoffStrategy(10000, 30000), 10, true)); if(proxyPort != null) { clientConfig.setProxyPort(proxyPort); } return clientConfig; }
Example #3
Source File: AwsS3ClientFactory.java From circus-train with Apache License 2.0 | 5 votes |
public AmazonS3 newInstance(Configuration conf) { int maxErrorRetry = conf.getInt(ConfigurationVariable.UPLOAD_RETRY_COUNT.getName(), ConfigurationVariable.UPLOAD_RETRY_COUNT.defaultIntValue()); long errorRetryDelay = conf.getLong(ConfigurationVariable.UPLOAD_RETRY_DELAY_MS.getName(), ConfigurationVariable.UPLOAD_RETRY_DELAY_MS.defaultLongValue()); LOG.info("Creating AWS S3 client with a retry policy of {} retries and {} ms of exponential backoff delay", maxErrorRetry, errorRetryDelay); RetryPolicy retryPolicy = new RetryPolicy(new CounterBasedRetryCondition(maxErrorRetry), new ExponentialBackoffStrategy(errorRetryDelay), maxErrorRetry, true); ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration.setRetryPolicy(retryPolicy); clientConfiguration.setMaxErrorRetry(maxErrorRetry); AmazonS3ClientBuilder builder = AmazonS3ClientBuilder .standard() .withCredentials(new HadoopAWSCredentialProviderChain(conf)) .withClientConfiguration(clientConfiguration); EndpointConfiguration endpointConfiguration = getEndpointConfiguration(conf); if (endpointConfiguration != null) { builder.withEndpointConfiguration(endpointConfiguration); } else { builder.withRegion(getRegion(conf)); } return builder.build(); }
Example #4
Source File: AwsEc2ServiceImpl.java From crate with Apache License 2.0 | 5 votes |
static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings clientSettings) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); clientConfiguration.setProtocol(clientSettings.protocol); if (Strings.hasText(clientSettings.proxyHost)) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } // Increase the number of retries in case of 5xx API responses final Random rand = Randomness.get(); final RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, (originalRequest, exception, retriesAttempted) -> { // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) logger.warn("EC2 API request failed, retry again. Reason was:", exception); return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); }, 10, false); clientConfiguration.setRetryPolicy(retryPolicy); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; }
Example #5
Source File: AWSAutoConfigurationTest.java From genie with Apache License 2.0 | 5 votes |
/** * Test expected context. */ @Test void testExpectedContext() { this.contextRunner.run( (context) -> { Assertions.assertThat(context).hasBean(AWSAutoConfiguration.SNS_CLIENT_BEAN_NAME); Assertions.assertThat(context).hasBean("SNSClientRetryPolicy"); Assertions.assertThat(context).hasBean("SNSClientConfiguration"); Assertions.assertThat( context.getBean("SNSClientRetryPolicy", RetryPolicy.class).getMaxErrorRetry() ).isEqualTo(3); } ); }
Example #6
Source File: AWSAutoConfiguration.java From genie with Apache License 2.0 | 5 votes |
/** * Create a named {@link ClientConfiguration} to be used by the {@link AmazonSNS} client, unless a bean by that * name already exists in context. * * @param retryPolicy The retry policy * @return a named {@link ClientConfiguration} */ @Bean(name = SNS_CLIENT_CONFIGURATION_BEAN_NAME) @ConditionalOnMissingBean(name = SNS_CLIENT_CONFIGURATION_BEAN_NAME) public ClientConfiguration jobNotificationsSNSClientConfiguration( @Qualifier(SNS_CLIENT_RETRY_POLICY_BEAN_NAME) final RetryPolicy retryPolicy ) { final ClientConfiguration configuration = new ClientConfigurationFactory().getConfig(); configuration.setRetryPolicy(retryPolicy); return configuration; }
Example #7
Source File: AWSAutoConfiguration.java From genie with Apache License 2.0 | 5 votes |
/** * Create a named {@link RetryPolicy} to be used by the {@link AmazonSNS} client, unless a bean by that name * already exists in context. * * @param retryProperties The retry properties * @return a named {@link RetryPolicy} */ @Bean(name = SNS_CLIENT_RETRY_POLICY_BEAN_NAME) @ConditionalOnMissingBean(name = SNS_CLIENT_RETRY_POLICY_BEAN_NAME) public RetryPolicy jobNotificationsSNSClientRetryPolicy( final RetryProperties retryProperties ) { return PredefinedRetryPolicies.getDefaultRetryPolicyWithCustomMaxRetries( retryProperties.getSns().getNoOfRetries() ); }
Example #8
Source File: ApiImporterDefaultModule.java From aws-apigateway-importer with Apache License 2.0 | 5 votes |
@Provides protected RetryPolicy.BackoffStrategy provideBackoffStrategy() { // tune these parameters to handle throttling errors final int maxBackoffInMilliseconds = 50 * 1000; // maximum exponential back-off time before retrying a request final int throttlingScaleFactor = 800; // base sleep time for throttling exceptions final int maxRetriesBeforeBackoff = 10; // log2(maxBackoffInMilliseconds/throttlingScaleFactor) final int baseScaleFactor = 600; // base sleep time for general exceptions final int throttlingScaleFactorRandomRange = throttlingScaleFactor / 4; final Random random = new Random(); return (originalRequest, exception, retriesAttempted) -> { LOG.debug("Caught error from service. Retry attempt: " + retriesAttempted, exception); if (retriesAttempted < 0) return 0; if (retriesAttempted > maxRetriesBeforeBackoff) return maxBackoffInMilliseconds; int scaleFactor; if (exception instanceof AmazonServiceException && RetryUtils.isThrottlingException((AmazonServiceException) exception)) { scaleFactor = throttlingScaleFactor + random.nextInt(throttlingScaleFactorRandomRange); } else { scaleFactor = baseScaleFactor; } long delay = (1L << retriesAttempted) * scaleFactor; delay = Math.min(delay, maxBackoffInMilliseconds); LOG.info("Client backing off for " + delay + "ms"); return delay; }; }
Example #9
Source File: ApiImporterDefaultModule.java From aws-apigateway-importer with Apache License 2.0 | 5 votes |
@Provides protected ApiGateway provideAmazonApiGateway(AWSCredentialsProvider credsProvider, RetryPolicy.BackoffStrategy backoffStrategy, @Named("region") String region) { final RetryPolicy retrypolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, backoffStrategy, 5, true); final ClientConfiguration clientConfig = new ClientConfiguration().withUserAgent(USER_AGENT).withRetryPolicy(retrypolicy); return new AmazonApiGateway(getEndpoint(region)).with(credsProvider).with(clientConfig).getApiGateway(); }
Example #10
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testDeleteDirectoryNoS3VersionsExist() { // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an empty version listing. VersionListing versionListing = new VersionListing(); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class))).thenReturn(versionListing); // Call the method under test. s3DaoImpl.deleteDirectory(s3FileTransferRequestParamsDto); // Verify the external calls. verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #11
Source File: ApiGateway.java From lambadaframework with MIT License | 5 votes |
protected AmazonApiGateway getApiGatewayClient() { if (apiGatewayClient != null) { return apiGatewayClient; } RetryPolicy.RetryCondition retryCondition = new RetryPolicy.RetryCondition() { @Override public boolean shouldRetry(AmazonWebServiceRequest amazonWebServiceRequest, AmazonClientException amazonClientException, int i) { if (amazonClientException instanceof TooManyRequestsException) { return true; } return PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION.shouldRetry(amazonWebServiceRequest, amazonClientException, i); } }; RetryPolicy retryPolicy = new RetryPolicy(retryCondition, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, 10, true); ClientConfiguration clientConfig = new ClientConfiguration() .withRetryPolicy(retryPolicy); apiGatewayClient = new AmazonApiGatewayClient(getAWSCredentialsProvideChain(), clientConfig).withRegion(Region.getRegion(Regions.fromName(deployment.getRegion()))); return apiGatewayClient; }
Example #12
Source File: AWSClientFactory.java From pipeline-aws-plugin with Apache License 2.0 | 5 votes |
private static ClientConfiguration getClientConfiguration(EnvVars vars) { ClientConfiguration clientConfiguration = new ClientConfiguration(); //the default max retry is 3. Increasing this to be more resilient to upstream errors clientConfiguration.setRetryPolicy(new RetryPolicy(null, null, 10, false)); ProxyConfiguration.configure(vars, clientConfiguration); return clientConfiguration; }
Example #13
Source File: RetryPolicyFactoryTest.java From herd with Apache License 2.0 | 5 votes |
/** * Asserts that the retry policy generated by the factory is configured with the correct values. */ @Test public void assertResultRetryPolicyConfiguredCorrectly() { int expectedMaxErrorRetry = 12345; when(configurationHelper.getProperty(any(), eq(Integer.class))).thenReturn(expectedMaxErrorRetry); RetryPolicy retryPolicy = retryPolicyFactory.getRetryPolicy(); assertEquals(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, retryPolicy.getRetryCondition()); assertEquals(backoffStrategy, retryPolicy.getBackoffStrategy()); assertEquals(expectedMaxErrorRetry, retryPolicy.getMaxErrorRetry()); }
Example #14
Source File: StsDaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testGetTemporarySecurityCredentialsMissingOptionalParameters() { // Create an AWS parameters DTO without proxy settings. AwsParamsDto awsParamsDto = new AwsParamsDto(); // Specify the duration, in seconds, of the role session. int awsRoleDurationSeconds = INTEGER_VALUE; // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create the expected assume role request. AssumeRoleRequest assumeRoleRequest = new AssumeRoleRequest().withRoleArn(AWS_ROLE_ARN).withRoleSessionName(SESSION_NAME).withDurationSeconds(awsRoleDurationSeconds); // Create AWS credentials for API authentication. Credentials credentials = new Credentials(); credentials.setAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY); credentials.setSecretAccessKey(AWS_ASSUMED_ROLE_SECRET_KEY); credentials.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN); // Create an assume role result. AssumeRoleResult assumeRoleResult = new AssumeRoleResult(); assumeRoleResult.setCredentials(credentials); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(stsOperations.assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest))).thenReturn(assumeRoleResult); // Call the method under test. Please note that we do not specify an IAM policy. Credentials result = stsDaoImpl.getTemporarySecurityCredentials(awsParamsDto, SESSION_NAME, AWS_ROLE_ARN, awsRoleDurationSeconds, null); // Verify the external calls. verify(retryPolicyFactory).getRetryPolicy(); verify(stsOperations).assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest)); verifyNoMoreInteractionsHelper(); // Validate the returned object. assertEquals(credentials, result); }
Example #15
Source File: StsDaoTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testGetTemporarySecurityCredentials() { // Create an AWS parameters DTO with proxy settings. AwsParamsDto awsParamsDto = new AwsParamsDto(); awsParamsDto.setHttpProxyHost(HTTP_PROXY_HOST); awsParamsDto.setHttpProxyPort(HTTP_PROXY_PORT); // Specify the duration, in seconds, of the role session. int awsRoleDurationSeconds = INTEGER_VALUE; // Create an IAM policy. Policy policy = new Policy(STRING_VALUE); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create the expected assume role request. AssumeRoleRequest assumeRoleRequest = new AssumeRoleRequest().withRoleArn(AWS_ROLE_ARN).withRoleSessionName(SESSION_NAME).withPolicy(policy.toJson()) .withDurationSeconds(awsRoleDurationSeconds); // Create AWS credentials for API authentication. Credentials credentials = new Credentials(); credentials.setAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY); credentials.setSecretAccessKey(AWS_ASSUMED_ROLE_SECRET_KEY); credentials.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN); // Create an assume role result. AssumeRoleResult assumeRoleResult = new AssumeRoleResult(); assumeRoleResult.setCredentials(credentials); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(stsOperations.assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest))).thenReturn(assumeRoleResult); // Call the method under test. Credentials result = stsDaoImpl.getTemporarySecurityCredentials(awsParamsDto, SESSION_NAME, AWS_ROLE_ARN, awsRoleDurationSeconds, policy); // Verify the external calls. verify(retryPolicyFactory).getRetryPolicy(); verify(stsOperations).assumeRole(any(AWSSecurityTokenServiceClient.class), eq(assumeRoleRequest)); verifyNoMoreInteractionsHelper(); // Validate the returned object. assertEquals(credentials, result); }
Example #16
Source File: AwsComponents.java From kork with Apache License 2.0 | 4 votes |
@Bean RetryPolicy.BackoffStrategy instrumentedBackoffStrategy(Registry registry) { return new InstrumentedBackoffStrategy(registry); }
Example #17
Source File: AWSDatabaseHolder.java From billow with Apache License 2.0 | 4 votes |
public AWSDatabaseHolder(Config config) { maxAgeInMs = config.getDuration("maxAge", TimeUnit.MILLISECONDS); final DefaultAWSCredentialsProviderChain awsCredentialsProviderChain = new DefaultAWSCredentialsProviderChain(); final ClientConfiguration clientConfig = new ClientConfiguration(); clientConfig.setRetryPolicy(new RetryPolicy(null, null, config.getInt("maxErrorRetry"), true)); clientConfig.setSocketTimeout(config.getInt("socketTimeout") * 1000); final AmazonEC2 bootstrapEC2Client = AmazonEC2ClientBuilder.standard().withCredentials(awsCredentialsProviderChain).build(); ec2Clients = Maps.newHashMap(); rdsClients = Maps.newHashMap(); sqsClients = Maps.newHashMap(); dynamoDBClients = Maps.newHashMap(); elasticacheClients = Maps.newHashMap(); elasticsearchClients = Maps.newHashMap(); final List<Region> ec2Regions = bootstrapEC2Client.describeRegions().getRegions(); for (Region region : ec2Regions) { final String regionName = region.getRegionName(); final String endpoint = region.getEndpoint(); log.debug("Adding ec2 region {}", region); if (config.getBoolean("ec2Enabled")) { final AmazonEC2Client ec2Client = new AmazonEC2Client(awsCredentialsProviderChain, clientConfig); ec2Client.setEndpoint(endpoint); ec2Clients.put(regionName, ec2Client); } if (config.getBoolean("rdsEnabled")) { final AmazonRDSClient rdsClient = new AmazonRDSClient(awsCredentialsProviderChain, clientConfig); rdsClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "rds.")); rdsClients.put(regionName, rdsClient); } if (config.getBoolean("dynamodbEnabled")) { final AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(awsCredentialsProviderChain, clientConfig); dynamoDBClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "dynamodb.")); dynamoDBClients.put(regionName, dynamoDBClient); } if (config.getBoolean("sqsEnabled")) { final AmazonSQSClient sqsClient = new AmazonSQSClient(awsCredentialsProviderChain, clientConfig); sqsClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "sqs.")); sqsClients.put(regionName, sqsClient); } if (config.getBoolean("elasticacheEnabled")) { final AmazonElastiCacheClient elastiCacheClient = new AmazonElastiCacheClient (awsCredentialsProviderChain, clientConfig); elastiCacheClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "elasticache.")); elasticacheClients.put(regionName, elastiCacheClient); } if (config.getBoolean("elasticsearchEnabled")) { final AWSElasticsearchClient elasticsearchClient = new AWSElasticsearchClient (awsCredentialsProviderChain, clientConfig); elasticsearchClient.setEndpoint(endpoint.replaceFirst("ec2\\.", "es.")); elasticsearchClients.put(regionName, elasticsearchClient); } } this.iamClient = AmazonIdentityManagementClientBuilder.standard() .withCredentials(awsCredentialsProviderChain) .withClientConfiguration(clientConfig) .build(); if (config.hasPath("accountNumber")) { this.awsAccountNumber = config.getString("accountNumber"); } else { this.awsAccountNumber = null; } if (config.hasPath("arnPartition")) { this.awsARNPartition = config.getString("arnPartition"); } else { this.awsARNPartition = "aws"; } rebuild(); }
Example #18
Source File: KinesisAppender.java From kinesis-log4j-appender with Apache License 2.0 | 4 votes |
/** * Configures this appender instance and makes it ready for use by the * consumers. It validates mandatory parameters and confirms if the configured * stream is ready for publishing data yet. * * Error details are made available through the fallback handler for this * appender * * @throws IllegalStateException * if we encounter issues configuring this appender instance */ @Override public void activateOptions() { if (streamName == null) { initializationFailed = true; error("Invalid configuration - streamName cannot be null for appender: " + name); } if (layout == null) { initializationFailed = true; error("Invalid configuration - No layout for appender: " + name); } ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration = setProxySettingsFromSystemProperties(clientConfiguration); clientConfiguration.setMaxErrorRetry(maxRetries); clientConfiguration.setRetryPolicy(new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, maxRetries, true)); clientConfiguration.setUserAgent(AppenderConstants.USER_AGENT_STRING); BlockingQueue<Runnable> taskBuffer = new LinkedBlockingDeque<Runnable>(bufferSize); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(threadCount, threadCount, AppenderConstants.DEFAULT_THREAD_KEEP_ALIVE_SEC, TimeUnit.SECONDS, taskBuffer, new BlockFastProducerPolicy()); threadPoolExecutor.prestartAllCoreThreads(); kinesisClient = new AmazonKinesisAsyncClient(new CustomCredentialsProviderChain(), clientConfiguration, threadPoolExecutor); boolean regionProvided = !Validator.isBlank(region); if (!regionProvided) { region = AppenderConstants.DEFAULT_REGION; } if (!Validator.isBlank(endpoint)) { if (regionProvided) { LOGGER .warn("Received configuration for both region as well as Amazon Kinesis endpoint. (" + endpoint + ") will be used as endpoint instead of default endpoint for region (" + region + ")"); } kinesisClient.setEndpoint(endpoint, AppenderConstants.DEFAULT_SERVICE_NAME, region); } else { kinesisClient.setRegion(Region.getRegion(Regions.fromName(region))); } DescribeStreamResult describeResult = null; try { describeResult = kinesisClient.describeStream(streamName); String streamStatus = describeResult.getStreamDescription().getStreamStatus(); if (!StreamStatus.ACTIVE.name().equals(streamStatus) && !StreamStatus.UPDATING.name().equals(streamStatus)) { initializationFailed = true; error("Stream " + streamName + " is not ready (in active/updating status) for appender: " + name); } } catch (ResourceNotFoundException rnfe) { initializationFailed = true; error("Stream " + streamName + " doesn't exist for appender: " + name, rnfe); } asyncCallHander = new AsyncPutCallStatsReporter(name); }
Example #19
Source File: InstrumentedBackoffStrategy.java From kork with Apache License 2.0 | 4 votes |
public InstrumentedBackoffStrategy(Registry registry, RetryPolicy.BackoffStrategy delegate) { this.registry = Objects.requireNonNull(registry, "registry"); this.delegate = Objects.requireNonNull(delegate, "delegate"); }
Example #20
Source File: InstrumentedRetryCondition.java From kork with Apache License 2.0 | 4 votes |
public InstrumentedRetryCondition(Registry registry, RetryPolicy.RetryCondition delegate) { this.registry = Objects.requireNonNull(registry, "registry"); this.delegate = Objects.requireNonNull(delegate, "delegate"); }
Example #21
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testRestoreObjectsInDeepArchiveWithExpeditedArchiveRetrievalOption() { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata with DeepArchive storage class. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(new AmazonServiceException("Retrieval option is not supported by this storage class")).when(s3Operations) .restoreObject(any(RestoreObjectRequest.class), any(AmazonS3.class)); try { s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Expedited.toString()); fail(); } catch (IllegalArgumentException e) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: Retrieval option is not supported by this storage class (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TEST_FILE, S3_BUCKET_NAME), e.getMessage()); } }
Example #22
Source File: AwsComponents.java From kork with Apache License 2.0 | 4 votes |
@Bean RetryPolicy.RetryCondition instrumentedRetryCondition(Registry registry) { return new InstrumentedRetryCondition(registry); }
Example #23
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
private void runTagVersionsTest() { // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); // Create an S3 file transfer request parameters DTO to tag S3 objects. S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = new S3FileTransferRequestParamsDto(); s3ObjectTaggerParamsDto.setAwsAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY); s3ObjectTaggerParamsDto.setAwsSecretKey(AWS_ASSUMED_ROLE_SECRET_KEY); s3ObjectTaggerParamsDto.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN); // Create an S3 version summary. S3VersionSummary s3VersionSummary = new S3VersionSummary(); s3VersionSummary.setKey(S3_KEY); s3VersionSummary.setVersionId(S3_VERSION_ID); // Create an S3 object tag. Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create a get object tagging result. GetObjectTaggingResult getObjectTaggingResult = new GetObjectTaggingResult(null); // Create a set object tagging result. SetObjectTaggingResult setObjectTaggingResult = new SetObjectTaggingResult(); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(getObjectTaggingResult); when(s3Operations.setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(setObjectTaggingResult); // Call the method under test. s3DaoImpl.tagVersions(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, Collections.singletonList(s3VersionSummary), tag); // Verify the external calls. verify(retryPolicyFactory, times(2)).getRetryPolicy(); verify(s3Operations).getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class)); verify(s3Operations).setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #24
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
private void runTagObjectsTest() { // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); // Create an S3 file transfer request parameters DTO to tag S3 objects. S3FileTransferRequestParamsDto s3ObjectTaggerParamsDto = new S3FileTransferRequestParamsDto(); s3ObjectTaggerParamsDto.setAwsAccessKeyId(AWS_ASSUMED_ROLE_ACCESS_KEY); s3ObjectTaggerParamsDto.setAwsSecretKey(AWS_ASSUMED_ROLE_SECRET_KEY); s3ObjectTaggerParamsDto.setSessionToken(AWS_ASSUMED_ROLE_SESSION_TOKEN); // Create an S3 object summary. S3ObjectSummary s3ObjectSummary = new S3ObjectSummary(); s3ObjectSummary.setKey(S3_KEY); // Create an S3 object tag. Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create a get object tagging result. GetObjectTaggingResult getObjectTaggingResult = new GetObjectTaggingResult(null); // Create a set object tagging result. SetObjectTaggingResult setObjectTaggingResult = new SetObjectTaggingResult(); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(getObjectTaggingResult); when(s3Operations.setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class))).thenReturn(setObjectTaggingResult); // Call the method under test. s3DaoImpl.tagObjects(s3FileTransferRequestParamsDto, s3ObjectTaggerParamsDto, Collections.singletonList(s3ObjectSummary), tag); // Verify the external calls. verify(retryPolicyFactory, times(2)).getRetryPolicy(); verify(s3Operations).getObjectTagging(any(GetObjectTaggingRequest.class), any(AmazonS3Client.class)); verify(s3Operations).setObjectTagging(any(SetObjectTaggingRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #25
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
/** * Run restore objects method * * @param archiveRetrievalOption the archive retrieval option */ private void runRestoreObjects(String archiveRetrievalOption, StorageClass storageClass) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, storageClass); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doNothing().when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, archiveRetrievalOption); RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(archiveRetrievalOption) ? archiveRetrievalOption : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #26
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
private void testRestoreObjectsWithS3Exception(String exceptionMessage, int statusCode) { List<File> files = Collections.singletonList(new File(TEST_FILE)); // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); s3FileTransferRequestParamsDto.setFiles(files); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an Object Metadata ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setOngoingRestore(false); objectMetadata.setHeader(Headers.STORAGE_CLASS, StorageClass.DeepArchive); ArgumentCaptor<AmazonS3Client> s3ClientCaptor = ArgumentCaptor.forClass(AmazonS3Client.class); ArgumentCaptor<String> s3BucketNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> keyCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<RestoreObjectRequest> requestStoreCaptor = ArgumentCaptor.forClass(RestoreObjectRequest.class); // Create an Amazon S3 Exception AmazonS3Exception amazonS3Exception = new AmazonS3Exception(exceptionMessage); amazonS3Exception.setStatusCode(statusCode); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.getObjectMetadata(s3BucketNameCaptor.capture(), keyCaptor.capture(), s3ClientCaptor.capture())).thenReturn(objectMetadata); doThrow(amazonS3Exception).when(s3Operations).restoreObject(requestStoreCaptor.capture(), s3ClientCaptor.capture()); try { // Call the method under test. s3DaoImpl.restoreObjects(s3FileTransferRequestParamsDto, EXPIRATION_IN_DAYS, Tier.Standard.toString()); // If this is not a restore already in progress exception message (409) then we should have caught an exception. // Else if this is a restore already in progress message (409) then continue as usual. if (!exceptionMessage.equals(RESTORE_ALREADY_IN_PROGRESS_EXCEPTION_MESSAGE)) { // Should not be here. Fail! fail(); } else { RestoreObjectRequest requestStore = requestStoreCaptor.getValue(); assertEquals(S3_BUCKET_NAME, s3BucketNameCaptor.getValue()); assertEquals(TEST_FILE, keyCaptor.getValue()); // Verify Bulk option is used when the option is not provided assertEquals(StringUtils.isNotEmpty(Tier.Standard.toString()) ? Tier.Standard.toString() : Tier.Bulk.toString(), requestStore.getGlacierJobParameters().getTier()); } } catch (IllegalStateException illegalStateException) { assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " + "Reason: com.amazonaws.services.s3.model.AmazonS3Exception: %s " + "(Service: null; Status Code: %s; Error Code: null; Request ID: null; S3 Extended Request ID: null), S3 Extended Request ID: null", TEST_FILE, S3_BUCKET_NAME, exceptionMessage, statusCode), illegalStateException.getMessage()); } // Verify the external calls verify(retryPolicyFactory).getRetryPolicy(); verify(s3Operations).getObjectMetadata(anyString(), anyString(), any(AmazonS3Client.class)); verify(s3Operations).restoreObject(any(RestoreObjectRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #27
Source File: S3DaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testDeleteDirectoryMultiObjectDeleteException() { // Create an S3 file transfer request parameters DTO to access S3 objects. S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto(); s3FileTransferRequestParamsDto.setS3BucketName(S3_BUCKET_NAME); s3FileTransferRequestParamsDto.setS3KeyPrefix(S3_KEY_PREFIX); // Create a retry policy. RetryPolicy retryPolicy = new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, INTEGER_VALUE, true); // Create an S3 version summary. S3VersionSummary s3VersionSummary = new S3VersionSummary(); s3VersionSummary.setKey(S3_KEY); s3VersionSummary.setVersionId(S3_VERSION_ID); // Create a version listing. VersionListing versionListing = new VersionListing(); versionListing.setVersionSummaries(Collections.singletonList(s3VersionSummary)); // Create a delete error. MultiObjectDeleteException.DeleteError deleteError = new MultiObjectDeleteException.DeleteError(); deleteError.setKey(S3_KEY); deleteError.setVersionId(S3_VERSION_ID); deleteError.setCode(ERROR_CODE); deleteError.setMessage(ERROR_MESSAGE); // Create a multi object delete exception. MultiObjectDeleteException multiObjectDeleteException = new MultiObjectDeleteException(Collections.singletonList(deleteError), new ArrayList<>()); // Mock the external calls. when(retryPolicyFactory.getRetryPolicy()).thenReturn(retryPolicy); when(s3Operations.listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class))).thenReturn(versionListing); when(s3Operations.deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class))).thenThrow(multiObjectDeleteException); // Try to call the method under test. try { s3DaoImpl.deleteDirectory(s3FileTransferRequestParamsDto); } catch (IllegalStateException e) { assertEquals(String.format( "Failed to delete keys/key versions with prefix \"%s\" from bucket \"%s\". Reason: One or more objects could not be deleted " + "(Service: null; Status Code: 0; Error Code: null; Request ID: null; S3 Extended Request ID: null)", S3_KEY_PREFIX, S3_BUCKET_NAME), e.getMessage()); } // Verify the external calls. verify(retryPolicyFactory, times(2)).getRetryPolicy(); verify(s3Operations).listVersions(any(ListVersionsRequest.class), any(AmazonS3Client.class)); verify(s3Operations).deleteObjects(any(DeleteObjectsRequest.class), any(AmazonS3Client.class)); verifyNoMoreInteractionsHelper(); }
Example #28
Source File: RetryPolicyFactory.java From herd with Apache License 2.0 | 2 votes |
/** * Gets the application's default retry policy. The policy uses PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, a SimpleExponentialBackoffStrategy, and the * maximum number of attempts is dynamically configurable through AWS_MAX_RETRY_ATTEMPT. * * @return RetryPolicy */ public RetryPolicy getRetryPolicy() { return new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION, backoffStrategy, configurationHelper.getProperty(ConfigurationValue.AWS_MAX_RETRY_ATTEMPT, Integer.class), true); }