Java Code Examples for com.google.api.services.bigquery.model.TableReference#setProjectId()
The following examples show how to use
com.google.api.services.bigquery.model.TableReference#setProjectId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BigQueryStorageTableSource.java From beam with Apache License 2.0 | 6 votes |
@Override protected Table getTargetTable(BigQueryOptions options) throws Exception { if (cachedTable.get() == null) { TableReference tableReference = tableReferenceProvider.get(); if (Strings.isNullOrEmpty(tableReference.getProjectId())) { checkState( !Strings.isNullOrEmpty(options.getProject()), "No project ID set in %s or %s, cannot construct a complete %s", TableReference.class.getSimpleName(), BigQueryOptions.class.getSimpleName(), TableReference.class.getSimpleName()); LOG.info( "Project ID not set in {}. Using default project from {}.", TableReference.class.getSimpleName(), BigQueryOptions.class.getSimpleName()); tableReference.setProjectId(options.getProject()); } Table table = bqServices.getDatasetService(options).getTable(tableReference); cachedTable.compareAndSet(null, table); } return cachedTable.get(); }
Example 2
Source File: BigQueryTableSourceDef.java From beam with Apache License 2.0 | 6 votes |
/** * Sets the {@link TableReference#projectId} of the provided table reference to the id of the * default project if the table reference does not have a project ID specified. */ private TableReference setDefaultProjectIfAbsent( BigQueryOptions bqOptions, TableReference tableReference) { if (Strings.isNullOrEmpty(tableReference.getProjectId())) { checkState( !Strings.isNullOrEmpty(bqOptions.getProject()), "No project ID set in %s or %s, cannot construct a complete %s", TableReference.class.getSimpleName(), BigQueryOptions.class.getSimpleName(), TableReference.class.getSimpleName()); LOG.info( "Project ID not set in {}. Using default project from {}.", TableReference.class.getSimpleName(), BigQueryOptions.class.getSimpleName()); tableReference.setProjectId(bqOptions.getProject()); } return tableReference; }
Example 3
Source File: BigQueryIO.java From beam with Apache License 2.0 | 6 votes |
/** * Returns the table to write, or {@code null} if writing with {@code tableFunction}. * * <p>If the table's project is not specified, use the executing project. */ @Nullable ValueProvider<TableReference> getTableWithDefaultProject(BigQueryOptions bqOptions) { ValueProvider<TableReference> table = getTable(); if (table == null) { return table; } if (!table.isAccessible()) { LOG.info( "Using a dynamic value for table input. This must contain a project" + " in the table reference: {}", table); return table; } if (Strings.isNullOrEmpty(table.get().getProjectId())) { // If user does not specify a project we assume the table to be located in // the default project. TableReference tableRef = table.get(); tableRef.setProjectId(bqOptions.getProject()); return NestedValueProvider.of( StaticValueProvider.of(BigQueryHelpers.toJsonString(tableRef)), new JsonTableRefToTableRef()); } return table; }
Example 4
Source File: PartitionedTableRef.java From dataflow-opinion-analysis with Apache License 2.0 | 6 votes |
/** * input - a tupel that contains the data element (TableRow), the window, the timestamp, and the pane */ @Override public TableDestination apply(ValueInSingleWindow<TableRow> input) { String partition; if (this.isTimeField) { String sTime = (String) input.getValue().get(this.fieldName); Instant time = Instant.parse(sTime); partition = time.toString(partitionFormatter); } else { partition = ((Integer) input.getValue().get(this.fieldName)).toString(); } TableReference reference = new TableReference(); reference.setProjectId(this.projectId); reference.setDatasetId(this.datasetId); reference.setTableId(this.partitionPrefix + partition); return new TableDestination(reference, null); }
Example 5
Source File: TrafficRoutes.java From beam with Apache License 2.0 | 5 votes |
public static void runTrafficRoutes(TrafficRoutesOptions options) throws IOException { // Using ExampleUtils to set up required resources. ExampleUtils exampleUtils = new ExampleUtils(options); exampleUtils.setup(); Pipeline pipeline = Pipeline.create(options); TableReference tableRef = new TableReference(); tableRef.setProjectId(options.getProject()); tableRef.setDatasetId(options.getBigQueryDataset()); tableRef.setTableId(options.getBigQueryTable()); pipeline .apply("ReadLines", new ReadFileAndExtractTimestamps(options.getInputFile())) // row... => <station route, station speed> ... .apply(ParDo.of(new ExtractStationSpeedFn())) // map the incoming data stream into sliding windows. .apply( Window.into( SlidingWindows.of(Duration.standardMinutes(options.getWindowDuration())) .every(Duration.standardMinutes(options.getWindowSlideEvery())))) .apply(new TrackSpeed()) .apply(BigQueryIO.writeTableRows().to(tableRef).withSchema(FormatStatsFn.getSchema())); // Run the pipeline. PipelineResult result = pipeline.run(); // ExampleUtils will try to cancel the pipeline and the injector before the program exists. exampleUtils.waitToFinish(result); }
Example 6
Source File: BigQueryOutputRuntime.java From components with Apache License 2.0 | 5 votes |
@Override public PDone expand(PCollection<IndexedRecord> in) { TableReference table = new TableReference(); table.setProjectId(datastore.projectName.getValue()); table.setDatasetId(dataset.bqDataset.getValue()); table.setTableId(dataset.tableName.getValue()); BigQueryIO.Write bigQueryIOPTransform = BigQueryIO.writeTableRows().to(table); bigQueryIOPTransform = setTableOperation(bigQueryIOPTransform); bigQueryIOPTransform = setWriteOperation(bigQueryIOPTransform); in.apply(ParDo.of(new IndexedRecordToTableRowFn())).apply(bigQueryIOPTransform); return PDone.in(in.getPipeline()); }
Example 7
Source File: BigQueryStringsTest.java From hadoop-connectors with Apache License 2.0 | 5 votes |
@Test public void testTableReferenceToStringWithNoProjectId() { TableReference tableRef = new TableReference() .setDatasetId("foo") .setTableId("bar"); assertThat(BigQueryStrings.toString(tableRef)).isEqualTo("foo.bar"); // Empty string doesn't cause a leading ':'. tableRef.setProjectId(""); assertThat(BigQueryStrings.toString(tableRef)).isEqualTo("foo.bar"); }
Example 8
Source File: TriggerExample.java From beam with Apache License 2.0 | 5 votes |
/** Sets the table reference. */ private static TableReference getTableReference(String project, String dataset, String table) { TableReference tableRef = new TableReference(); tableRef.setProjectId(project); tableRef.setDatasetId(dataset); tableRef.setTableId(table); return tableRef; }
Example 9
Source File: TrafficMaxLaneFlow.java From beam with Apache License 2.0 | 5 votes |
public static void runTrafficMaxLaneFlow(TrafficMaxLaneFlowOptions options) throws IOException { // Using ExampleUtils to set up required resources. ExampleUtils exampleUtils = new ExampleUtils(options); exampleUtils.setup(); Pipeline pipeline = Pipeline.create(options); TableReference tableRef = new TableReference(); tableRef.setProjectId(options.getProject()); tableRef.setDatasetId(options.getBigQueryDataset()); tableRef.setTableId(options.getBigQueryTable()); pipeline .apply("ReadLines", new ReadFileAndExtractTimestamps(options.getInputFile())) // row... => <station route, station speed> ... .apply(ParDo.of(new ExtractFlowInfoFn())) // map the incoming data stream into sliding windows. .apply( Window.into( SlidingWindows.of(Duration.standardMinutes(options.getWindowDuration())) .every(Duration.standardMinutes(options.getWindowSlideEvery())))) .apply(new MaxLaneFlow()) .apply(BigQueryIO.writeTableRows().to(tableRef).withSchema(FormatMaxesFn.getSchema())); // Run the pipeline. PipelineResult result = pipeline.run(); // ExampleUtils will try to cancel the pipeline and the injector before the program exists. exampleUtils.waitToFinish(result); }
Example 10
Source File: WriteToBigQuery.java From beam with Apache License 2.0 | 5 votes |
/** Utility to construct an output table reference. */ static TableReference getTable(String projectId, String datasetId, String tableName) { TableReference table = new TableReference(); table.setDatasetId(datasetId); table.setProjectId(projectId); table.setTableId(tableName); return table; }
Example 11
Source File: OpinionAnalysisPipeline.java From dataflow-opinion-analysis with Apache License 2.0 | 5 votes |
private static TableReference getSentimentTableReference(IndexerPipelineOptions options) { TableReference tableRef = new TableReference(); tableRef.setProjectId(options.getProject()); tableRef.setDatasetId(options.getBigQueryDataset()); tableRef.setTableId(IndexerPipelineUtils.SENTIMENT_TABLE); return tableRef; }
Example 12
Source File: OpinionAnalysisPipeline.java From dataflow-opinion-analysis with Apache License 2.0 | 5 votes |
private static TableReference getDocumentTableReference(IndexerPipelineOptions options) { TableReference tableRef = new TableReference(); tableRef.setProjectId(options.getProject()); tableRef.setDatasetId(options.getBigQueryDataset()); tableRef.setTableId(IndexerPipelineUtils.DOCUMENT_TABLE); return tableRef; }
Example 13
Source File: OpinionAnalysisPipeline.java From dataflow-opinion-analysis with Apache License 2.0 | 5 votes |
private static TableReference getWebResourceTableReference(IndexerPipelineOptions options) { TableReference tableRef = new TableReference(); tableRef.setProjectId(options.getProject()); tableRef.setDatasetId(options.getBigQueryDataset()); tableRef.setTableId(IndexerPipelineUtils.WEBRESOURCE_TABLE); return tableRef; }
Example 14
Source File: WriteToBigQuery.java From deployment-examples with MIT License | 5 votes |
/** Utility to construct an output table reference. */ static TableReference getTable(String projectId, String datasetId, String tableName) { TableReference table = new TableReference(); table.setDatasetId(datasetId); table.setProjectId(projectId); table.setTableId(tableName); return table; }
Example 15
Source File: BeamBQInputTransform.java From hop with Apache License 2.0 | 4 votes |
@Override public PCollection<HopRow> expand( PBegin input ) { try { // Only initialize once on this node/vm // BeamHop.init(transformPluginClasses, xpPluginClasses); // Function to convert from Avro to Hop rows // BQSchemaAndRecordToHopFn toHopFn = new BQSchemaAndRecordToHopFn( transformName, rowMetaJson, transformPluginClasses, xpPluginClasses ); TableReference tableReference = new TableReference(); if (StringUtils.isNotEmpty( projectId )) { tableReference.setProjectId( projectId ); } tableReference.setDatasetId( datasetId ); tableReference.setTableId( tableId ); BigQueryIO.TypedRead<HopRow> bqTypedRead; if (StringUtils.isEmpty( query )) { bqTypedRead = BigQueryIO .read( toHopFn ) .from( tableReference ) ; } else { bqTypedRead = BigQueryIO .read( toHopFn ) .fromQuery( query ) ; } // Apply the function // PCollection<HopRow> output = input.apply( bqTypedRead ); return output; } catch ( Exception e ) { numErrors.inc(); LOG.error( "Error in beam input transform", e ); throw new RuntimeException( "Error in beam input transform", e ); } }
Example 16
Source File: GsonBigQueryInputFormatTest.java From hadoop-connectors with Apache License 2.0 | 4 votes |
/** * Creates an in-memory GHFS. * * @throws IOException on IOError. */ @Before public void setUp() throws IOException { MockitoAnnotations.initMocks(this); LoggerConfig.getConfig(GsonBigQueryInputFormat.class).setLevel(Level.FINE); // Set the Hadoop job configuration. config = new JobConf(InMemoryGoogleHadoopFileSystem.getSampleConfiguration()); config.set(BigQueryConfiguration.PROJECT_ID.getKey(), jobProjectId); config.set(BigQueryConfiguration.INPUT_PROJECT_ID.getKey(), dataProjectId); config.set(BigQueryConfiguration.INPUT_DATASET_ID.getKey(), intermediateDataset); config.set(BigQueryConfiguration.INPUT_TABLE_ID.getKey(), intermediateTable); config.set(BigQueryConfiguration.TEMP_GCS_PATH.getKey(), "gs://test_bucket/other_path"); config.setClass( INPUT_FORMAT_CLASS.getKey(), GsonBigQueryInputFormat.class, AbstractBigQueryInputFormat.class); config.setBoolean(BigQueryConfiguration.DELETE_EXPORT_FILES_FROM_GCS.getKey(), true); CredentialConfigurationUtil.addTestConfigurationSettings(config); // Create a GoogleHadoopFileSystem to use to initialize and write to // the in-memory GcsFs. ghfs = new InMemoryGoogleHadoopFileSystem(); JobReference fakeJobReference = new JobReference() .setProjectId(jobProjectId) .setJobId("bigquery-job-1234") .setLocation("test-job-location"); // Create the job result. jobStatus = new JobStatus(); jobStatus.setState("DONE"); jobStatus.setErrorResult(null); jobHandle = new Job(); jobHandle.setStatus(jobStatus); jobHandle.setJobReference(fakeJobReference); // Create table reference. tableRef = new TableReference(); tableRef.setProjectId(dataProjectId); tableRef.setDatasetId("test_dataset"); tableRef.setTableId("test_table"); table = new Table().setTableReference(tableRef).setLocation("test_location"); when(mockBigQueryHelper.getRawBigquery()) .thenReturn(mockBigquery); // Mocks for Bigquery jobs. when(mockBigquery.jobs()) .thenReturn(mockBigqueryJobs); // Mock getting Bigquery job. when(mockBigqueryJobs.get(any(String.class), any(String.class))) .thenReturn(mockBigqueryJobsGet); when(mockBigqueryJobsGet.setLocation(any(String.class))).thenReturn(mockBigqueryJobsGet); when(mockBigqueryJobsGet.execute()) .thenReturn(jobHandle); // Mock inserting Bigquery job. when(mockBigqueryJobs.insert(any(String.class), any(Job.class))) .thenReturn(mockBigqueryJobsInsert); when(mockBigqueryJobsInsert.execute()) .thenReturn(jobHandle); // Mocks for Bigquery tables. when(mockBigquery.tables()) .thenReturn(mockBigqueryTables); // Mocks for getting Bigquery table. when(mockBigqueryTables.get(any(String.class), any(String.class), any(String.class))) .thenReturn(mockBigqueryTablesGet); when(mockBigqueryTablesGet.execute()) .thenReturn(table); when(mockBigQueryHelper.getTable(any(TableReference.class))) .thenReturn(table); when(mockBigQueryHelper.createJobReference( any(String.class), any(String.class), any(String.class))) .thenReturn(fakeJobReference); when(mockBigQueryHelper.insertJobOrFetchDuplicate(any(String.class), any(Job.class))) .thenReturn(jobHandle); }
Example 17
Source File: WriteTables.java From beam with Apache License 2.0 | 4 votes |
@ProcessElement public void processElement(ProcessContext c, BoundedWindow window) throws Exception { dynamicDestinations.setSideInputAccessorFromProcessContext(c); DestinationT destination = c.element().getKey().getKey(); TableSchema tableSchema; if (firstPaneCreateDisposition == CreateDisposition.CREATE_NEVER) { tableSchema = null; } else if (jsonSchemas.containsKey(destination)) { tableSchema = BigQueryHelpers.fromJsonString(jsonSchemas.get(destination), TableSchema.class); } else { tableSchema = dynamicDestinations.getSchema(destination); checkArgument( tableSchema != null, "Unless create disposition is %s, a schema must be specified, i.e. " + "DynamicDestinations.getSchema() may not return null. " + "However, create disposition is %s, and %s returned null for destination %s", CreateDisposition.CREATE_NEVER, firstPaneCreateDisposition, dynamicDestinations, destination); jsonSchemas.put(destination, BigQueryHelpers.toJsonString(tableSchema)); } TableDestination tableDestination = dynamicDestinations.getTable(destination); checkArgument( tableDestination != null, "DynamicDestinations.getTable() may not return null, " + "but %s returned null for destination %s", dynamicDestinations, destination); boolean destinationCoderSupportsClustering = !(dynamicDestinations.getDestinationCoder() instanceof TableDestinationCoderV2); checkArgument( tableDestination.getClustering() == null || destinationCoderSupportsClustering, "DynamicDestinations.getTable() may only return destinations with clustering configured" + " if a destination coder is supplied that supports clustering, but %s is configured" + " to use TableDestinationCoderV2. Set withClustering() on BigQueryIO.write() and, " + " if you provided a custom DynamicDestinations instance, override" + " getDestinationCoder() to return TableDestinationCoderV3.", dynamicDestinations); TableReference tableReference = tableDestination.getTableReference(); if (Strings.isNullOrEmpty(tableReference.getProjectId())) { tableReference.setProjectId(c.getPipelineOptions().as(BigQueryOptions.class).getProject()); tableDestination = tableDestination.withTableReference(tableReference); } Integer partition = c.element().getKey().getShardNumber(); List<String> partitionFiles = Lists.newArrayList(c.element().getValue()); String jobIdPrefix = BigQueryHelpers.createJobId( c.sideInput(loadJobIdPrefixView), tableDestination, partition, c.pane().getIndex()); if (tempTable) { // This is a temp table. Create a new one for each partition and each pane. tableReference.setTableId(jobIdPrefix); } WriteDisposition writeDisposition = firstPaneWriteDisposition; CreateDisposition createDisposition = firstPaneCreateDisposition; if (c.pane().getIndex() > 0 && !tempTable) { // If writing directly to the destination, then the table is created on the first write // and we should change the disposition for subsequent writes. writeDisposition = WriteDisposition.WRITE_APPEND; createDisposition = CreateDisposition.CREATE_NEVER; } else if (tempTable) { // In this case, we are writing to a temp table and always need to create it. // WRITE_TRUNCATE is set so that we properly handle retries of this pane. writeDisposition = WriteDisposition.WRITE_TRUNCATE; createDisposition = CreateDisposition.CREATE_IF_NEEDED; } BigQueryHelpers.PendingJob retryJob = startLoad( bqServices.getJobService(c.getPipelineOptions().as(BigQueryOptions.class)), bqServices.getDatasetService(c.getPipelineOptions().as(BigQueryOptions.class)), jobIdPrefix, tableReference, tableDestination.getTimePartitioning(), tableDestination.getClustering(), tableSchema, partitionFiles, writeDisposition, createDisposition, schemaUpdateOptions); pendingJobs.add( new PendingJobData(window, retryJob, partitionFiles, tableDestination, tableReference)); }
Example 18
Source File: BeamBQInputTransform.java From kettle-beam with Apache License 2.0 | 4 votes |
@Override public PCollection<KettleRow> expand( PBegin input ) { try { // Only initialize once on this node/vm // BeamKettle.init(stepPluginClasses, xpPluginClasses); // Function to convert from Avro to Kettle rows // BQSchemaAndRecordToKettleFn toKettleFn = new BQSchemaAndRecordToKettleFn( stepname, rowMetaJson, stepPluginClasses, xpPluginClasses ); TableReference tableReference = new TableReference(); if (StringUtils.isNotEmpty( projectId )) { tableReference.setProjectId( projectId ); } tableReference.setDatasetId( datasetId ); tableReference.setTableId( tableId ); BigQueryIO.TypedRead<KettleRow> bqTypedRead; if (StringUtils.isEmpty( query )) { bqTypedRead = BigQueryIO .read( toKettleFn ) .from( tableReference ) ; } else { bqTypedRead = BigQueryIO .read( toKettleFn ) .fromQuery( query ) ; } // Apply the function // PCollection<KettleRow> output = input.apply( bqTypedRead ); return output; } catch ( Exception e ) { numErrors.inc(); LOG.error( "Error in beam input transform", e ); throw new RuntimeException( "Error in beam input transform", e ); } }
Example 19
Source File: CreateTables.java From beam with Apache License 2.0 | 4 votes |
private TableDestination getTableDestination(ProcessContext context, DestinationT destination) { TableDestination tableDestination = dynamicDestinations.getTable(destination); checkArgument( tableDestination != null, "DynamicDestinations.getTable() may not return null, " + "but %s returned null for destination %s", dynamicDestinations, destination); checkArgument( tableDestination.getTableSpec() != null, "DynamicDestinations.getTable() must return a TableDestination " + "with a non-null table spec, but %s returned %s for destination %s," + "which has a null table spec", dynamicDestinations, tableDestination, destination); boolean destinationCoderSupportsClustering = !(dynamicDestinations.getDestinationCoder() instanceof TableDestinationCoderV2); checkArgument( tableDestination.getClustering() == null || destinationCoderSupportsClustering, "DynamicDestinations.getTable() may only return destinations with clustering configured" + " if a destination coder is supplied that supports clustering, but %s is configured" + " to use TableDestinationCoderV2. Set withClustering() on BigQueryIO.write() and, " + " if you provided a custom DynamicDestinations instance, override" + " getDestinationCoder() to return TableDestinationCoderV3.", dynamicDestinations); TableReference tableReference = tableDestination.getTableReference().clone(); if (Strings.isNullOrEmpty(tableReference.getProjectId())) { tableReference.setProjectId( context.getPipelineOptions().as(BigQueryOptions.class).getProject()); tableDestination = tableDestination.withTableReference(tableReference); } if (createDisposition == CreateDisposition.CREATE_NEVER) { return tableDestination; } String tableSpec = BigQueryHelpers.stripPartitionDecorator(tableDestination.getTableSpec()); if (!createdTables.contains(tableSpec)) { // Another thread may have succeeded in creating the table in the meanwhile, so // check again. This check isn't needed for correctness, but we add it to prevent // every thread from attempting a create and overwhelming our BigQuery quota. synchronized (createdTables) { if (!createdTables.contains(tableSpec)) { tryCreateTable(context, destination, tableDestination, tableSpec, kmsKey); } } } return tableDestination; }
Example 20
Source File: BeamBQOutputTransform.java From hop with Apache License 2.0 | 4 votes |
@Override public PDone expand( PCollection<HopRow> input ) { try { // Only initialize once on this node/vm // BeamHop.init( transformPluginClasses, xpPluginClasses ); // Inflate the metadata on the node where this is running... // IRowMeta rowMeta = JsonRowMeta.fromJson( rowMetaJson ); // Which table do we write to? // TableReference tableReference = new TableReference(); if ( StringUtils.isNotEmpty( projectId ) ) { tableReference.setProjectId( projectId ); } tableReference.setDatasetId( datasetId ); tableReference.setTableId( tableId ); TableSchema tableSchema = new TableSchema(); List<TableFieldSchema> schemaFields = new ArrayList<>(); for ( IValueMeta valueMeta : rowMeta.getValueMetaList() ) { TableFieldSchema schemaField = new TableFieldSchema(); schemaField.setName( valueMeta.getName() ); switch(valueMeta.getType()){ case IValueMeta.TYPE_STRING: schemaField.setType( "STRING" ); break; case IValueMeta.TYPE_INTEGER: schemaField.setType( "INTEGER" ); break; case IValueMeta.TYPE_DATE: schemaField.setType( "DATETIME" ); break; case IValueMeta.TYPE_BOOLEAN: schemaField.setType( "BOOLEAN" ); break; case IValueMeta.TYPE_NUMBER: schemaField.setType( "FLOAT" ); break; default: throw new RuntimeException( "Conversion from Hop value "+valueMeta.toString()+" to BigQuery TableRow isn't supported yet" ); } schemaFields.add(schemaField); } tableSchema.setFields( schemaFields ); SerializableFunction<HopRow, TableRow> formatFunction = new HopToBQTableRowFn( transformName, rowMetaJson, transformPluginClasses, xpPluginClasses ); BigQueryIO.Write.CreateDisposition createDisposition; if (createIfNeeded) { createDisposition = BigQueryIO.Write.CreateDisposition.CREATE_IF_NEEDED; } else { createDisposition = BigQueryIO.Write.CreateDisposition.CREATE_NEVER; } BigQueryIO.Write.WriteDisposition writeDisposition; if (truncateTable) { writeDisposition = BigQueryIO.Write.WriteDisposition.WRITE_APPEND; } else { if (failIfNotEmpty) { writeDisposition = BigQueryIO.Write.WriteDisposition.WRITE_EMPTY; } else { writeDisposition = BigQueryIO.Write.WriteDisposition.WRITE_APPEND; } } BigQueryIO.Write<HopRow> bigQueryWrite = BigQueryIO .<HopRow>write() .to( tableReference ) .withSchema( tableSchema ) .withCreateDisposition( createDisposition ) .withWriteDisposition( writeDisposition ) .withFormatFunction( formatFunction ); // TODO: pass the results along the way at some point // input.apply( transformName, bigQueryWrite ); // End of the line // return PDone.in( input.getPipeline() ); } catch ( Exception e ) { numErrors.inc(); LOG.error( "Error in Beam BigQuery output transform", e ); throw new RuntimeException( "Error in Beam BigQuery output transform", e ); } }