com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder Java Examples
The following examples show how to use
com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AwsClientFactory.java From herd with Apache License 2.0 | 6 votes |
/** * Creates a client for accessing Amazon EMR service. * * @param awsParamsDto the AWS related parameters DTO that includes optional AWS credentials and proxy information * * @return the Amazon EMR client */ @Cacheable(DaoSpringModuleConfig.HERD_CACHE_NAME) public AmazonElasticMapReduce getEmrClient(AwsParamsDto awsParamsDto) { // Get client configuration. ClientConfiguration clientConfiguration = awsHelper.getClientConfiguration(awsParamsDto); // If specified, use the AWS credentials passed in. if (StringUtils.isNotBlank(awsParamsDto.getAwsAccessKeyId())) { return AmazonElasticMapReduceClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider( new BasicSessionCredentials(awsParamsDto.getAwsAccessKeyId(), awsParamsDto.getAwsSecretKey(), awsParamsDto.getSessionToken()))) .withClientConfiguration(clientConfiguration).withRegion(awsParamsDto.getAwsRegionName()).build(); } // Otherwise, use the default AWS credentials provider chain. else { return AmazonElasticMapReduceClientBuilder.standard().withClientConfiguration(clientConfiguration).withRegion(awsParamsDto.getAwsRegionName()) .build(); } }
Example #2
Source File: TableProviderFactory.java From aws-athena-query-federation with Apache License 2.0 | 5 votes |
public TableProviderFactory() { this(AmazonEC2ClientBuilder.standard().build(), AmazonElasticMapReduceClientBuilder.standard().build(), AmazonRDSClientBuilder.standard().build(), AmazonS3ClientBuilder.standard().build()); }
Example #3
Source File: InventoryUtil.java From pacbot with Apache License 2.0 | 5 votes |
/** * Fetch EMR info. * * @param temporaryCredentials the temporary credentials * @param skipRegions the skip regions * @param accountId the accountId * @param accountName the account name * @return the map */ public static Map<String,List<Cluster>> fetchEMRInfo(BasicSessionCredentials temporaryCredentials, String skipRegions,String accountId,String accountName){ Map<String,List<Cluster>> clusterList = new LinkedHashMap<>(); String expPrefix = InventoryConstants.ERROR_PREFIX_CODE+accountId + "\",\"Message\": \"Exception in fetching info for resource in specific region\" ,\"type\": \"EMR\" , \"region\":\"" ; for(Region region : RegionUtils.getRegions()){ try{ if(!skipRegions.contains(region.getName())){ AmazonElasticMapReduce emrClient = AmazonElasticMapReduceClientBuilder.standard(). withCredentials(new AWSStaticCredentialsProvider(temporaryCredentials)).withRegion(region.getName()).build(); List<ClusterSummary> clusters = new ArrayList<>(); String marker = null; ListClustersResult clusterResult ; do{ clusterResult = emrClient.listClusters(new ListClustersRequest().withMarker(marker)); clusters.addAll(clusterResult.getClusters()); marker = clusterResult.getMarker(); }while(marker!=null); List<Cluster> clustersList = new ArrayList<>(); clusters.forEach(cluster -> { DescribeClusterResult descClstrRslt = emrClient.describeCluster(new DescribeClusterRequest().withClusterId(cluster.getId())); clustersList.add(descClstrRslt.getCluster()); }); if( !clustersList.isEmpty() ){ log.debug(InventoryConstants.ACCOUNT + accountId +" Type : EMR "+region.getName() + " >> "+clustersList.size()); clusterList.put(accountId+delimiter+accountName+delimiter+region.getName(),clustersList); } } }catch(Exception e){ if(region.isServiceSupported(AmazonElasticMapReduce.ENDPOINT_PREFIX)){ log.warn(expPrefix+ region.getName()+InventoryConstants.ERROR_CAUSE +e.getMessage()+"\"}"); ErrorManageUtil.uploadError(accountId,region.getName(),"emr",e.getMessage()); } } } return clusterList; }
Example #4
Source File: InventoryUtilTest.java From pacbot with Apache License 2.0 | 5 votes |
/** * Fetch EMR info test. * * @throws Exception the exception */ @SuppressWarnings("static-access") @Test public void fetchEMRInfoTest() throws Exception { mockStatic(AmazonElasticMapReduceClientBuilder.class); AmazonElasticMapReduce emrClient = PowerMockito.mock(AmazonElasticMapReduce.class); AmazonElasticMapReduceClientBuilder amazonElasticFileSystemClientBuilder = PowerMockito.mock(AmazonElasticMapReduceClientBuilder.class); AWSStaticCredentialsProvider awsStaticCredentialsProvider = PowerMockito.mock(AWSStaticCredentialsProvider.class); PowerMockito.whenNew(AWSStaticCredentialsProvider.class).withAnyArguments().thenReturn(awsStaticCredentialsProvider); when(amazonElasticFileSystemClientBuilder.standard()).thenReturn(amazonElasticFileSystemClientBuilder); when(amazonElasticFileSystemClientBuilder.withCredentials(anyObject())).thenReturn(amazonElasticFileSystemClientBuilder); when(amazonElasticFileSystemClientBuilder.withRegion(anyString())).thenReturn(amazonElasticFileSystemClientBuilder); when(amazonElasticFileSystemClientBuilder.build()).thenReturn(emrClient); ListClustersResult listClustersResult = new ListClustersResult(); List<ClusterSummary> clusters = new ArrayList<>(); ClusterSummary clusterSummary = new ClusterSummary(); clusterSummary.setId("id"); clusters.add(clusterSummary); listClustersResult.setClusters(clusters); when(emrClient.listClusters(anyObject())).thenReturn(listClustersResult); DescribeClusterResult describeClusterResult = new DescribeClusterResult(); describeClusterResult.setCluster(new Cluster()); when(emrClient.describeCluster(anyObject())).thenReturn(describeClusterResult); assertThat(inventoryUtil.fetchEMRInfo(new BasicSessionCredentials("awsAccessKey", "awsSecretKey", "sessionToken"), "skipRegions", "account","accountName").size(), is(1)); }
Example #5
Source File: ClusterManager.java From herd-mdl with Apache License 2.0 | 5 votes |
AmazonElasticMapReduce createEmrClient() { DefaultAWSCredentialsProviderChain defaultAWSCredentialsProviderChain = new DefaultAWSCredentialsProviderChain(); AWSCredentials credentials = defaultAWSCredentialsProviderChain.getCredentials(); emrClient = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials)) .build(); return emrClient; }
Example #6
Source File: emr-add-steps.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { AWSCredentials credentials_profile = null; try { credentials_profile = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load credentials from .aws/credentials file. " + "Make sure that the credentials file exists and the profile name is specified within it.", e); } AmazonElasticMapReduce emr = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials_profile)) .withRegion(Regions.US_WEST_1) .build(); // Run a bash script using a predefined step in the StepFactory helper class StepFactory stepFactory = new StepFactory(); StepConfig runBashScript = new StepConfig() .withName("Run a bash script") .withHadoopJarStep(stepFactory.newScriptRunnerStep("s3://jeffgoll/emr-scripts/create_users.sh")) .withActionOnFailure("CONTINUE"); // Run a custom jar file as a step HadoopJarStepConfig hadoopConfig1 = new HadoopJarStepConfig() .withJar("s3://path/to/my/jarfolder") // replace with the location of the jar to run as a step .withMainClass("com.my.Main1") // optional main class, this can be omitted if jar above has a manifest .withArgs("--verbose"); // optional list of arguments to pass to the jar StepConfig myCustomJarStep = new StepConfig("RunHadoopJar", hadoopConfig1); AddJobFlowStepsResult result = emr.addJobFlowSteps(new AddJobFlowStepsRequest() .withJobFlowId("j-xxxxxxxxxxxx") // replace with cluster id to run the steps .withSteps(runBashScript,myCustomJarStep)); System.out.println(result.getStepIds()); }
Example #7
Source File: EmrDaoImplTest.java From herd with Apache License 2.0 | 5 votes |
@Test public void testCreateEmrClusterNoNscdBootstrapScript() { // Create an AWS parameters DTO. final AwsParamsDto awsParamsDto = new AwsParamsDto(AWS_ASSUMED_ROLE_ACCESS_KEY, AWS_ASSUMED_ROLE_SECRET_KEY, AWS_ASSUMED_ROLE_SESSION_TOKEN, HTTP_PROXY_HOST, HTTP_PROXY_PORT, AWS_REGION_NAME_US_EAST_1); EmrClusterDefinition emrClusterDefinition = new EmrClusterDefinition(); final InstanceDefinitions instanceDefinitions = new InstanceDefinitions(new MasterInstanceDefinition(), new InstanceDefinition(), new InstanceDefinition()); emrClusterDefinition.setInstanceDefinitions(instanceDefinitions); emrClusterDefinition.setNodeTags(Collections.emptyList()); AmazonElasticMapReduce amazonElasticMapReduce = AmazonElasticMapReduceClientBuilder.standard().withRegion(awsParamsDto.getAwsRegionName()) .build(); when(awsClientFactory.getEmrClient(awsParamsDto)).thenReturn(amazonElasticMapReduce); when(emrOperations.runEmrJobFlow(amazonElasticMapReduceClientArgumentCaptor.capture(), runJobFlowRequestArgumentCaptor.capture())) .thenReturn(EMR_CLUSTER_ID); // Create the cluster without NSCD script configuration String clusterId = emrDaoImpl.createEmrCluster(EMR_CLUSTER_NAME, emrClusterDefinition, awsParamsDto); // Verifications assertEquals(clusterId, EMR_CLUSTER_ID); verify(configurationHelper).getProperty(ConfigurationValue.EMR_NSCD_SCRIPT); verify(awsClientFactory).getEmrClient(awsParamsDto); verify(emrOperations).runEmrJobFlow(any(), any()); RunJobFlowRequest runJobFlowRequest = runJobFlowRequestArgumentCaptor.getValue(); List<BootstrapActionConfig> bootstrapActionConfigs = runJobFlowRequest.getBootstrapActions(); // There should be no bootstrap action assertTrue(bootstrapActionConfigs.isEmpty()); }
Example #8
Source File: EmrClusterJob.java From datacollector with Apache License 2.0 | 5 votes |
@VisibleForTesting AmazonElasticMapReduce getEmrClient(EmrClusterConfig emrClusterConfig) { if (emrClient==null) { emrClient = AmazonElasticMapReduceClientBuilder.standard().withCredentials( new AWSStaticCredentialsProvider(new BasicAWSCredentials( emrClusterConfig.getAccessKey(), emrClusterConfig.getSecretKey() ))).withRegion(Regions.fromName(emrClusterConfig.getUserRegion())).build(); } return emrClient; }
Example #9
Source File: custom-emrfs-materials.java From aws-doc-sdk-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { AWSCredentials credentials_profile = null; try { credentials_profile = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load credentials from .aws/credentials file. " + "Make sure that the credentials file exists and the profile name is specified within it.", e); } AmazonElasticMapReduce emr = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials_profile)) .withRegion(Regions.US_WEST_1) .build(); Map<String,String> emrfsProperties = new HashMap<String,String>(); emrfsProperties.put("fs.s3.cse.encryptionMaterialsProvider.uri","s3://mybucket/MyCustomEncryptionMaterialsProvider.jar"); emrfsProperties.put("fs.s3.cse.enabled","true"); emrfsProperties.put("fs.s3.consistent","true"); emrfsProperties.put("fs.s3.cse.encryptionMaterialsProvider","full.class.name.of.EncryptionMaterialsProvider"); Configuration myEmrfsConfig = new Configuration() .withClassification("emrfs-site") .withProperties(emrfsProperties); Application hive = new Application().withName("Hive"); Application spark = new Application().withName("Spark"); Application ganglia = new Application().withName("Ganglia"); Application zeppelin = new Application().withName("Zeppelin"); RunJobFlowRequest request = new RunJobFlowRequest() .withName("ClusterWithCustomEMRFSEncryptionMaterialsProvider") .withReleaseLabel("emr-5.20.0") .withApplications(hive,spark,ganglia,zeppelin) .withConfigurations(myEmrfsConfig) .withServiceRole("EMR_DefaultRole") .withJobFlowRole("EMR_EC2_DefaultRole") .withLogUri("s3://path/to/emr/logs") .withInstances(new JobFlowInstancesConfig() .withEc2KeyName("myEc2Key") .withInstanceCount(3) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType("m4.large") .withSlaveInstanceType("m4.large") ); RunJobFlowResult result = emr.runJobFlow(request); System.out.println("The cluster ID is " + result.toString()); }
Example #10
Source File: emr-flink-cluster-transient-step.java From aws-doc-sdk-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { AWSCredentials credentials_profile = null; try { credentials_profile = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load credentials from .aws/credentials file. " + "Make sure that the credentials file exists and the profile name is specified within it.", e); } AmazonElasticMapReduce emr = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials_profile)) .withRegion(Regions.US_WEST_1) .build(); List<StepConfig> stepConfigs = new ArrayList<StepConfig>(); HadoopJarStepConfig flinkWordCountConf = new HadoopJarStepConfig() .withJar("command-runner.jar") .withArgs("bash","-c", "flink", "run", "-m", "yarn-cluster", "-yn", "2", "/usr/lib/flink/examples/streaming/WordCount.jar", "--input", "s3://path/to/input-file.txt", "--output", "s3://path/to/output/"); StepConfig flinkRunWordCountStep = new StepConfig() .withName("Flink add a wordcount step and terminate") .withActionOnFailure("CONTINUE") .withHadoopJarStep(flinkWordCountConf); stepConfigs.add(flinkRunWordCountStep); Application flink = new Application().withName("Flink"); RunJobFlowRequest request = new RunJobFlowRequest() .withName("flink-transient") .withReleaseLabel("emr-5.20.0") .withApplications(flink) .withServiceRole("EMR_DefaultRole") .withJobFlowRole("EMR_EC2_DefaultRole") .withLogUri("s3://path/to/my/logfiles") .withInstances(new JobFlowInstancesConfig() .withEc2KeyName("myEc2Key") .withEc2SubnetId("subnet-12ab3c45") .withInstanceCount(3) .withKeepJobFlowAliveWhenNoSteps(false) .withMasterInstanceType("m4.large") .withSlaveInstanceType("m4.large")) .withSteps(stepConfigs); RunJobFlowResult result = emr.runJobFlow(request); System.out.println("The cluster ID is " + result.toString()); }
Example #11
Source File: create_cluster.java From aws-doc-sdk-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { AWSCredentials credentials_profile = null; try { credentials_profile = new ProfileCredentialsProvider("default").getCredentials(); // specifies any named profile in .aws/credentials as the credentials provider } catch (Exception e) { throw new AmazonClientException( "Cannot load credentials from .aws/credentials file. " + "Make sure that the credentials file exists and that the profile name is defined within it.", e); } // create an EMR client using the credentials and region specified in order to create the cluster AmazonElasticMapReduce emr = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials_profile)) .withRegion(Regions.US_WEST_1) .build(); // create a step to enable debugging in the AWS Management Console StepFactory stepFactory = new StepFactory(); StepConfig enabledebugging = new StepConfig() .withName("Enable debugging") .withActionOnFailure("TERMINATE_JOB_FLOW") .withHadoopJarStep(stepFactory.newEnableDebuggingStep()); // specify applications to be installed and configured when EMR creates the cluster Application hive = new Application().withName("Hive"); Application spark = new Application().withName("Spark"); Application ganglia = new Application().withName("Ganglia"); Application zeppelin = new Application().withName("Zeppelin"); // create the cluster RunJobFlowRequest request = new RunJobFlowRequest() .withName("MyClusterCreatedFromJava") .withReleaseLabel("emr-5.20.0") // specifies the EMR release version label, we recommend the latest release .withSteps(enabledebugging) .withApplications(hive,spark,ganglia,zeppelin) .withLogUri("s3://path/to/my/emr/logs") // a URI in S3 for log files is required when debugging is enabled .withServiceRole("EMR_DefaultRole") // replace the default with a custom IAM service role if one is used .withJobFlowRole("EMR_EC2_DefaultRole") // replace the default with a custom EMR role for the EC2 instance profile if one is used .withInstances(new JobFlowInstancesConfig() .withEc2SubnetId("subnet-12ab34c56") .withEc2KeyName("myEc2Key") .withInstanceCount(3) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType("m4.large") .withSlaveInstanceType("m4.large")); RunJobFlowResult result = emr.runJobFlow(request); System.out.println("The cluster ID is " + result.toString()); }
Example #12
Source File: create-spark-cluster.java From aws-doc-sdk-examples with Apache License 2.0 | 4 votes |
public static void main(String[] args) { AWSCredentials credentials_profile = null; try { credentials_profile = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load credentials from .aws/credentials file. " + "Make sure that the credentials file exists and the profile name is specified within it.", e); } AmazonElasticMapReduce emr = AmazonElasticMapReduceClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(credentials_profile)) .withRegion(Regions.US_WEST_1) .build(); // create a step to enable debugging in the AWS Management Console StepFactory stepFactory = new StepFactory(); StepConfig enabledebugging = new StepConfig() .withName("Enable debugging") .withActionOnFailure("TERMINATE_JOB_FLOW") .withHadoopJarStep(stepFactory.newEnableDebuggingStep()); Application spark = new Application().withName("Spark"); RunJobFlowRequest request = new RunJobFlowRequest() .withName("Spark Cluster") .withReleaseLabel("emr-5.20.0") .withSteps(enabledebugging) .withApplications(spark) .withLogUri("s3://path/to/my/logs/") .withServiceRole("EMR_DefaultRole") .withJobFlowRole("EMR_EC2_DefaultRole") .withInstances(new JobFlowInstancesConfig() .withEc2SubnetId("subnet-12ab3c45") .withEc2KeyName("myEc2Key") .withInstanceCount(3) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType("m4.large") .withSlaveInstanceType("m4.large") ); RunJobFlowResult result = emr.runJobFlow(request); System.out.println("The cluster ID is " + result.toString()); }
Example #13
Source File: EmrDaoImplTest.java From herd with Apache License 2.0 | 4 votes |
@Test public void testCreateEmrClusterWithNscdBootstrapScript() { // Create an AWS parameters DTO. final AwsParamsDto awsParamsDto = new AwsParamsDto(AWS_ASSUMED_ROLE_ACCESS_KEY, AWS_ASSUMED_ROLE_SECRET_KEY, AWS_ASSUMED_ROLE_SESSION_TOKEN, HTTP_PROXY_HOST, HTTP_PROXY_PORT, AWS_REGION_NAME_US_EAST_1); EmrClusterDefinition emrClusterDefinition = new EmrClusterDefinition(); final InstanceDefinitions instanceDefinitions = new InstanceDefinitions(new MasterInstanceDefinition(), new InstanceDefinition(), new InstanceDefinition()); emrClusterDefinition.setInstanceDefinitions(instanceDefinitions); emrClusterDefinition.setNodeTags(Collections.emptyList()); when(configurationHelper.getProperty(ConfigurationValue.EMR_NSCD_SCRIPT)).thenReturn(EMR_NSCD_SCRIPT); when(configurationHelper.getProperty(ConfigurationValue.S3_URL_PROTOCOL)).thenReturn(S3_URL_PROTOCOL); when(configurationHelper.getProperty(ConfigurationValue.S3_STAGING_BUCKET_NAME)).thenReturn(S3_BUCKET_NAME); when(configurationHelper.getProperty(ConfigurationValue.S3_STAGING_RESOURCE_BASE)).thenReturn(S3_STAGING_RESOURCE_BASE); when(configurationHelper.getProperty(ConfigurationValue.S3_URL_PATH_DELIMITER)).thenReturn(S3_URL_PATH_DELIMITER); when(configurationHelper.getProperty(ConfigurationValue.EMR_CONFIGURE_DAEMON)).thenReturn(EMR_CONFIGURE_DAEMON); List<Parameter> daemonConfigs = new ArrayList<>(); Parameter daemonConfig = new Parameter(); daemonConfig.setName(EMR_CLUSTER_DAEMON_CONFIG_NAME); daemonConfig.setValue(EMR_CLUSTER_DAEMON_CONFIG_VALUE); daemonConfigs.add(daemonConfig); emrClusterDefinition.setDaemonConfigurations(daemonConfigs); AmazonElasticMapReduce amazonElasticMapReduce = AmazonElasticMapReduceClientBuilder.standard().withRegion(awsParamsDto.getAwsRegionName()) .build(); when(awsClientFactory.getEmrClient(awsParamsDto)).thenReturn(amazonElasticMapReduce); when(awsClientFactory.getEmrClient(awsParamsDto)).thenReturn(amazonElasticMapReduce); when(emrOperations.runEmrJobFlow(amazonElasticMapReduceClientArgumentCaptor.capture(), runJobFlowRequestArgumentCaptor.capture())) .thenReturn(EMR_CLUSTER_ID); // Create the cluster String clusterId = emrDaoImpl.createEmrCluster(EMR_CLUSTER_NAME, emrClusterDefinition, awsParamsDto); // Verifications RunJobFlowRequest runJobFlowRequest = runJobFlowRequestArgumentCaptor.getValue(); assertEquals(clusterId, EMR_CLUSTER_ID); verify(configurationHelper).getProperty(ConfigurationValue.EMR_NSCD_SCRIPT); verify(configurationHelper).getProperty(ConfigurationValue.S3_URL_PROTOCOL); verify(configurationHelper).getProperty(ConfigurationValue.S3_STAGING_BUCKET_NAME); verify(configurationHelper).getProperty(ConfigurationValue.S3_STAGING_RESOURCE_BASE); verify(configurationHelper).getProperty(ConfigurationValue.EMR_CONFIGURE_DAEMON); verify(awsClientFactory).getEmrClient(awsParamsDto); verify(emrOperations).runEmrJobFlow((AmazonElasticMapReduceClient) amazonElasticMapReduce, runJobFlowRequest); List<BootstrapActionConfig> bootstrapActionConfigs = runJobFlowRequest.getBootstrapActions(); // There should be two bootstrap actions: NSCD script, and emr daemon config assertEquals(2, bootstrapActionConfigs.size()); // Verify NSCD bootstrap action assertEquals(ConfigurationValue.EMR_NSCD_SCRIPT.getKey(), bootstrapActionConfigs.get(0).getName()); assertEquals(String .format("%s%s%s%s%s%s", S3_URL_PROTOCOL, S3_BUCKET_NAME, S3_URL_PATH_DELIMITER, S3_STAGING_RESOURCE_BASE, S3_URL_PATH_DELIMITER, EMR_NSCD_SCRIPT), bootstrapActionConfigs.get(0).getScriptBootstrapAction().getPath()); // Verify EMR configure daemon bootstrap action assertEquals(ConfigurationValue.EMR_CONFIGURE_DAEMON.getKey(), bootstrapActionConfigs.get(1).getName()); assertEquals(EMR_CONFIGURE_DAEMON, bootstrapActionConfigs.get(1).getScriptBootstrapAction().getPath()); assertEquals(String.format("%s=%s", EMR_CLUSTER_DAEMON_CONFIG_NAME, EMR_CLUSTER_DAEMON_CONFIG_VALUE), bootstrapActionConfigs.get(1).getScriptBootstrapAction().getArgs().get(0)); }