Java Code Examples for org.apache.beam.runners.dataflow.options.DataflowPipelineOptions#setMaxNumWorkers()

The following examples show how to use org.apache.beam.runners.dataflow.options.DataflowPipelineOptions#setMaxNumWorkers() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataflowPipelineTranslatorTest.java    From beam with Apache License 2.0 5 votes vote down vote up
@Test
public void testMaxNumWorkersIsPassedWhenNoAlgorithmIsSet() throws IOException {
  final DataflowPipelineWorkerPoolOptions.AutoscalingAlgorithmType noScaling = null;
  DataflowPipelineOptions options = buildPipelineOptions();
  options.setMaxNumWorkers(42);
  options.setAutoscalingAlgorithm(noScaling);

  Pipeline p = buildPipeline(options);
  p.traverseTopologically(new RecordingPipelineVisitor());
  SdkComponents sdkComponents = createSdkComponents(options);
  RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(p, sdkComponents, true);
  Job job =
      DataflowPipelineTranslator.fromOptions(options)
          .translate(
              p,
              pipelineProto,
              sdkComponents,
              DataflowRunner.fromOptions(options),
              Collections.emptyList())
          .getJob();

  assertEquals(1, job.getEnvironment().getWorkerPools().size());
  assertNull(
      job.getEnvironment().getWorkerPools().get(0).getAutoscalingSettings().getAlgorithm());
  assertEquals(
      42,
      job.getEnvironment()
          .getWorkerPools()
          .get(0)
          .getAutoscalingSettings()
          .getMaxNumWorkers()
          .intValue());
}
 
Example 2
Source File: PubSubToBQPipeline.java    From pubsub-to-bigquery with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws GeneralSecurityException, IOException, ParseException, ParserConfigurationException, SAXException {
	String params = null;
	for (int i = 0; i < args.length; i++) {
		if (args[i].startsWith("--params="))
			params = args[i].replaceFirst("--params=", "");
	}

	System.out.println(params);
	init(params);

	GoogleCredentials credentials = ServiceAccountCredentials.fromStream(new FileInputStream(keyFile))
	        .createScoped(Arrays.asList(new String[] { "https://www.googleapis.com/auth/cloud-platform" }));

	DataflowPipelineOptions options = PipelineOptionsFactory.create().as(DataflowPipelineOptions.class);
	
	options.setRunner(DataflowRunner.class);
	// Your project ID is required in order to run your pipeline on the Google Cloud.
	options.setProject(projectId);
	// Your Google Cloud Storage path is required for staging local files.
	options.setStagingLocation(workingBucket);
	options.setTempLocation(workingBucket + "/temp");
	options.setGcpCredential(credentials);
	options.setServiceAccount(accountEmail);
	options.setMaxNumWorkers(maxNumWorkers);
	options.setDiskSizeGb(diskSizeGb);
	options.setWorkerMachineType(machineType);
	options.setAutoscalingAlgorithm(AutoscalingAlgorithmType.THROUGHPUT_BASED);
	options.setZone(zone);
	options.setStreaming(isStreaming);
	options.setJobName(pipelineName);
	Pipeline pipeline = Pipeline.create(options);
	
	Gson gson = new Gson();
	TableSchema schema = gson.fromJson(schemaStr, TableSchema.class);
	
	PCollection<String> streamData = null;
	if(pubSubTopicSub != null && !StringUtils.isEmpty(pubSubTopicSub)){
		streamData = pipeline.apply("ReadPubSub",PubsubIO.readStrings().fromSubscription(String.format("projects/%1$s/subscriptions/%2$s",projectId,pubSubTopicSub)));
	}
	else if(pubSubTopic != null && !StringUtils.isEmpty(pubSubTopic)){
		streamData = pipeline.apply("ReadPubSub",PubsubIO.readStrings().fromTopic(String.format("projects/%1$s/topics/%2$s",projectId,pubSubTopic)));
	}
	
	PCollection<TableRow> tableRow = streamData.apply("ToTableRow",ParDo.of(new PrepData.ToTableRow(owTimestamp, debugMode)));
	
	
	tableRow.apply("WriteToBQ",
			BigQueryIO.writeTableRows()
			.to(String.format("%1$s.%2$s",bqDataSet, bqTable))
			.withSchema(schema)
			.withWriteDisposition(BigQueryIO.Write.WriteDisposition.WRITE_APPEND));

	System.out.println("Starting pipeline " + pipelineName);
	pipeline.run();
}