com.amazonaws.services.elasticmapreduce.model.RunJobFlowResult Java Examples
The following examples show how to use
com.amazonaws.services.elasticmapreduce.model.RunJobFlowResult.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EmrIT.java From digdag with Apache License 2.0 | 5 votes |
@Test public void test() throws Exception { RunJobFlowRequest request = new RunJobFlowRequest() .withName("Digdag Test") .withReleaseLabel("emr-5.2.0") .withApplications(Stream.of("Hadoop", "Hive", "Spark", "Flink") .map(s -> new Application().withName(s)) .collect(toList())) .withJobFlowRole("EMR_EC2_DefaultRole") .withServiceRole("EMR_DefaultRole") .withVisibleToAllUsers(true) .withLogUri(tmpS3FolderUri + "/logs/") .withInstances(new JobFlowInstancesConfig() .withEc2KeyName("digdag-test") .withInstanceCount(1) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType("m3.xlarge") .withSlaveInstanceType("m3.xlarge")); RunJobFlowResult result = emr.runJobFlow(request); String clusterId = result.getJobFlowId(); clusterIds.add(clusterId); Id attemptId = pushAndStart(server.endpoint(), projectDir, "emr", ImmutableMap.of( "test_s3_folder", tmpS3FolderUri.toString(), "test_cluster", clusterId, "outfile", outfile.toString())); expect(Duration.ofMinutes(30), attemptSuccess(server.endpoint(), attemptId)); validateTdSparkQueryOutput(); assertThat(Files.exists(outfile), is(true)); }
Example #2
Source File: EmrClusterJob.java From datacollector with Apache License 2.0 | 5 votes |
@Override public String createCluster(String clusterName) { RunJobFlowRequest request = new RunJobFlowRequest() .withName(clusterName) .withReleaseLabel(EmrInfo.getVersion()) .withServiceRole(emrClusterConfig.getServiceRole()) .withJobFlowRole(emrClusterConfig.getJobFlowRole()) .withVisibleToAllUsers(emrClusterConfig.isVisibleToAllUsers()) .withInstances(new JobFlowInstancesConfig() .withEc2SubnetId(emrClusterConfig.getEc2SubnetId()) .withEmrManagedMasterSecurityGroup(emrClusterConfig.getMasterSecurityGroup()) .withEmrManagedSlaveSecurityGroup(emrClusterConfig.getSlaveSecurityGroup()) .withInstanceCount(emrClusterConfig.getInstanceCount()) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType(emrClusterConfig.getMasterInstanceType()) .withSlaveInstanceType(emrClusterConfig.getSlaveInstanceType())); if (emrClusterConfig.isLoggingEnabled()) { request.withLogUri(emrClusterConfig.getS3LogUri()); if (emrClusterConfig.isEnableEmrDebugging()) { String COMMAND_RUNNER = "command-runner.jar"; String DEBUGGING_COMMAND = "state-pusher-script"; String DEBUGGING_NAME = "Setup Hadoop Debugging"; StepConfig enabledebugging = new StepConfig() .withName(DEBUGGING_NAME) .withActionOnFailure(ActionOnFailure.CONTINUE) .withHadoopJarStep(new HadoopJarStepConfig() .withJar(COMMAND_RUNNER) .withArgs(DEBUGGING_COMMAND)); request.withSteps(enabledebugging); } } RunJobFlowResult result = getEmrClient(emrClusterConfig).runJobFlow(request); return result.getJobFlowId(); }
Example #3
Source File: TestEmrClusterJob.java From datacollector with Apache License 2.0 | 5 votes |
@Test public void testCreateCluster() { Properties properties = new Properties(); properties.setProperty("instanceCount", "1"); EmrClusterJob emrClusterJob = new EmrClusterJob(); EmrClusterJob.Client client = Mockito.spy(emrClusterJob.getClient(properties)); AmazonElasticMapReduce emr = Mockito.mock(AmazonElasticMapReduce.class); Mockito.doReturn(Mockito.mock(RunJobFlowResult.class)).when(emr).runJobFlow(Mockito.any(RunJobFlowRequest.class)); Mockito.doReturn(emr).when(client).getEmrClient(Mockito.any(EmrClusterConfig.class)); client.createCluster("foo"); Mockito.verify(emr, Mockito.times(1)).runJobFlow(Mockito.any(RunJobFlowRequest.class)); Mockito.verify(client, Mockito.times(1)).getEmrClient(Mockito.any(EmrClusterConfig.class)); }
Example #4
Source File: EmrOperatorFactory.java From digdag with Apache License 2.0 | 4 votes |
private NewCluster submitNewClusterRequest(AmazonElasticMapReduce emr, String tag, StepCompiler stepCompiler, Config cluster, Filer filer, ParameterCompiler parameterCompiler) throws IOException { RemoteFile runner = prepareRunner(filer, tag); // Compile steps stepCompiler.compile(runner); List<StepConfig> stepConfigs = stepCompiler.stepConfigs(); Config ec2 = cluster.getNested("ec2"); Config master = ec2.getNestedOrGetEmpty("master"); List<Config> core = ec2.getOptional("core", Config.class).transform(ImmutableList::of).or(ImmutableList.of()); List<Config> task = ec2.getListOrEmpty("task", Config.class); List<String> applications = cluster.getListOrEmpty("applications", String.class); if (applications.isEmpty()) { applications = ImmutableList.of("Hadoop", "Hive", "Spark", "Flink"); } // TODO: allow configuring additional application parameters List<Application> applicationConfigs = applications.stream() .map(application -> new Application().withName(application)) .collect(toList()); // TODO: merge configurations with the same classification? List<Configuration> configurations = cluster.getListOrEmpty("configurations", JsonNode.class).stream() .map(this::configurations) .flatMap(Collection::stream) .collect(toList()); List<JsonNode> bootstrap = cluster.getListOrEmpty("bootstrap", JsonNode.class); List<BootstrapActionConfig> bootstrapActions = new ArrayList<>(); for (int i = 0; i < bootstrap.size(); i++) { bootstrapActions.add(bootstrapAction(i + 1, bootstrap.get(i), tag, filer, runner, parameterCompiler)); } // Stage files to S3 filer.stageFiles(); Optional<String> subnetId = ec2.getOptional("subnet_id", String.class); String defaultMasterInstanceType; String defaultCoreInstanceType; String defaultTaskInstanceType; if (subnetId.isPresent()) { // m4 requires VPC (subnet id) defaultMasterInstanceType = "m4.2xlarge"; defaultCoreInstanceType = "m4.xlarge"; defaultTaskInstanceType = "m4.xlarge"; } else { defaultMasterInstanceType = "m3.2xlarge"; defaultCoreInstanceType = "m3.xlarge"; defaultTaskInstanceType = "m3.xlarge"; } RunJobFlowRequest request = new RunJobFlowRequest() .withName(cluster.get("name", String.class, "Digdag") + " (" + tag + ")") .withReleaseLabel(cluster.get("release", String.class, "emr-5.2.0")) .withSteps(stepConfigs) .withBootstrapActions(bootstrapActions) .withApplications(applicationConfigs) .withLogUri(cluster.get("logs", String.class, null)) .withJobFlowRole(cluster.get("cluster_role", String.class, "EMR_EC2_DefaultRole")) .withServiceRole(cluster.get("service_role", String.class, "EMR_DefaultRole")) .withTags(new Tag().withKey("DIGDAG_CLUSTER_ID").withValue(tag)) .withVisibleToAllUsers(cluster.get("visible", boolean.class, true)) .withConfigurations(configurations) .withInstances(new JobFlowInstancesConfig() .withInstanceGroups(ImmutableList.<InstanceGroupConfig>builder() // Master Node .add(instanceGroupConfig("Master", master, "MASTER", defaultMasterInstanceType, 1)) // Core Group .addAll(instanceGroupConfigs("Core", core, "CORE", defaultCoreInstanceType)) // Task Groups .addAll(instanceGroupConfigs("Task %d", task, "TASK", defaultTaskInstanceType)) .build() ) .withAdditionalMasterSecurityGroups(ec2.getListOrEmpty("additional_master_security_groups", String.class)) .withAdditionalSlaveSecurityGroups(ec2.getListOrEmpty("additional_slave_security_groups", String.class)) .withEmrManagedMasterSecurityGroup(ec2.get("emr_managed_master_security_group", String.class, null)) .withEmrManagedSlaveSecurityGroup(ec2.get("emr_managed_slave_security_group", String.class, null)) .withServiceAccessSecurityGroup(ec2.get("service_access_security_group", String.class, null)) .withTerminationProtected(cluster.get("termination_protected", boolean.class, false)) .withPlacement(cluster.getOptional("availability_zone", String.class) .transform(zone -> new PlacementType().withAvailabilityZone(zone)).orNull()) .withEc2SubnetId(subnetId.orNull()) .withEc2KeyName(ec2.get("key", String.class)) .withKeepJobFlowAliveWhenNoSteps(!cluster.get("auto_terminate", boolean.class, true))); logger.info("Submitting EMR job with {} steps(s)", request.getSteps().size()); RunJobFlowResult result = emr.runJobFlow(request); logger.info("Submitted EMR job with {} step(s): {}", request.getSteps().size(), result.getJobFlowId(), result); return NewCluster.of(result.getJobFlowId(), request.getSteps().size()); }
Example #5
Source File: EMRUtils.java From aws-big-data-blog with Apache License 2.0 | 4 votes |
/** * This method uses method the AWS Java to launch an Apache HBase cluster on Amazon EMR. * * @param client - AmazonElasticMapReduce client that interfaces directly with the Amazon EMR Web Service * @param clusterIdentifier - identifier of an existing cluster * @param amiVersion - AMI to use for launching this cluster * @param keypair - A keypair for SSHing into the Amazon EMR master node * @param masterInstanceType - Master node Amazon EC2 instance type * @param coreInstanceType - core nodes Amazon EC2 instance type * @param logUri - An Amazon S3 bucket for your * @param numberOfNodes - total number of nodes in this cluster including master node * @return */ public static String createCluster(AmazonElasticMapReduce client, String clusterIdentifier, String amiVersion, String keypair, String masterInstanceType, String coreInstanceType, String logUri, int numberOfNodes) { if (clusterExists(client, clusterIdentifier)) { LOG.info("Cluster " + clusterIdentifier + " is available"); return clusterIdentifier; } //Error checking if (amiVersion == null || amiVersion.isEmpty()) throw new RuntimeException("ERROR: Please specify an AMI Version"); if (keypair == null || keypair.isEmpty()) throw new RuntimeException("ERROR: Please specify a valid Amazon Key Pair"); if (masterInstanceType == null || masterInstanceType.isEmpty()) throw new RuntimeException("ERROR: Please specify a Master Instance Type"); if (logUri == null || logUri.isEmpty()) throw new RuntimeException("ERROR: Please specify a valid Amazon S3 bucket for your logs."); if (numberOfNodes < 0) throw new RuntimeException("ERROR: Please specify at least 1 node"); RunJobFlowRequest request = new RunJobFlowRequest() .withAmiVersion(amiVersion) .withBootstrapActions(new BootstrapActionConfig() .withName("Install HBase") .withScriptBootstrapAction(new ScriptBootstrapActionConfig() .withPath("s3://elasticmapreduce/bootstrap-actions/setup-hbase"))) .withName("Job Flow With HBAse Actions") .withSteps(new StepConfig() //enable debugging step .withName("Enable debugging") .withActionOnFailure("TERMINATE_CLUSTER") .withHadoopJarStep(new StepFactory().newEnableDebuggingStep()), //Start HBase step - after installing it with a bootstrap action createStepConfig("Start HBase","TERMINATE_CLUSTER", "/home/hadoop/lib/hbase.jar", getHBaseArgs()), //add HBase backup step createStepConfig("Modify backup schedule","TERMINATE_JOB_FLOW", "/home/hadoop/lib/hbase.jar", getHBaseBackupArgs())) .withLogUri(logUri) .withInstances(new JobFlowInstancesConfig() .withEc2KeyName(keypair) .withInstanceCount(numberOfNodes) .withKeepJobFlowAliveWhenNoSteps(true) .withMasterInstanceType(masterInstanceType) .withSlaveInstanceType(coreInstanceType)); RunJobFlowResult result = client.runJobFlow(request); String state = null; while (!(state = clusterState(client, result.getJobFlowId())).equalsIgnoreCase("waiting")) { try { Thread.sleep(10 * 1000); LOG.info(result.getJobFlowId() + " is " + state + ". Waiting for cluster to become available."); } catch (InterruptedException e) { } if (state.equalsIgnoreCase("TERMINATED_WITH_ERRORS")){ LOG.error("Could not create EMR Cluster"); System.exit(-1); } } LOG.info("Created cluster " + result.getJobFlowId()); LOG.info("Cluster " + clusterIdentifier + " is available"); return result.getJobFlowId(); }