com.google.api.services.bigquery.model.GetQueryResultsResponse Java Examples
The following examples show how to use
com.google.api.services.bigquery.model.GetQueryResultsResponse.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BigQueryInterpreter.java From zeppelin with Apache License 2.0 | 6 votes |
public static String printRows(final GetQueryResultsResponse response) { StringBuilder msg = new StringBuilder(); try { List<String> schemNames = new ArrayList<String>(); for (TableFieldSchema schem: response.getSchema().getFields()) { schemNames.add(schem.getName()); } msg.append(Joiner.on(TAB).join(schemNames)); msg.append(NEWLINE); for (TableRow row : response.getRows()) { List<String> fieldValues = new ArrayList<String>(); for (TableCell field : row.getF()) { fieldValues.add(field.getV().toString()); } msg.append(Joiner.on(TAB).join(fieldValues)); msg.append(NEWLINE); } return msg.toString(); } catch (NullPointerException ex) { throw new NullPointerException("SQL Execution returned an error!"); } }
Example #2
Source File: BigQueryInterpreter.java From zeppelin with Apache License 2.0 | 6 votes |
public static Iterator<GetQueryResultsResponse> run(final String queryString, final String projId, final long wTime, final long maxRows, Boolean useLegacySql) throws IOException { try { logger.info("Use legacy sql: {}", useLegacySql); QueryResponse query; query = service .jobs() .query( projId, new QueryRequest().setTimeoutMs(wTime) .setUseLegacySql(useLegacySql).setQuery(queryString) .setMaxResults(maxRows)).execute(); jobId = query.getJobReference().getJobId(); projectId = query.getJobReference().getProjectId(); GetQueryResults getRequest = service.jobs().getQueryResults( projectId, jobId); return getPages(getRequest); } catch (IOException ex) { throw ex; } }
Example #3
Source File: BigqueryConnection.java From nomulus with Apache License 2.0 | 5 votes |
/** * Returns the query results for the given job as an ImmutableTable, row-keyed by row number * (indexed from 1), column-keyed by the TableFieldSchema for that field, and with the value * object as the cell value. Note that null values will not actually be null (since we're using * ImmutableTable) but they can be checked for using Data.isNull(). * * <p>This table is fully materialized in memory (not lazily loaded), so it should not be used * with queries expected to return large results. */ private ImmutableTable<Integer, TableFieldSchema, Object> getQueryResults(Job job) { try { ImmutableTable.Builder<Integer, TableFieldSchema, Object> builder = new ImmutableTable.Builder<>(); String pageToken = null; int rowNumber = 1; while (true) { GetQueryResultsResponse queryResults = bigquery.jobs() .getQueryResults(getProjectId(), job.getJobReference().getJobId()) .setPageToken(pageToken) .execute(); // If the job isn't complete yet, retry; getQueryResults() waits for up to 10 seconds on // each invocation so this will effectively poll for completion. if (queryResults.getJobComplete()) { List<TableFieldSchema> schemaFields = queryResults.getSchema().getFields(); for (TableRow row : queryResults.getRows()) { Iterator<TableFieldSchema> fieldIterator = schemaFields.iterator(); Iterator<TableCell> cellIterator = row.getF().iterator(); while (fieldIterator.hasNext() && cellIterator.hasNext()) { builder.put(rowNumber, fieldIterator.next(), cellIterator.next().getV()); } rowNumber++; } pageToken = queryResults.getPageToken(); if (pageToken == null) { break; } } } return builder.build(); } catch (IOException e) { throw BigqueryJobFailureException.create(e); } }
Example #4
Source File: BigqueryClient.java From beam with Apache License 2.0 | 4 votes |
/** Performs a query without flattening results. */ @Nonnull public List<TableRow> queryUnflattened(String query, String projectId, boolean typed) throws IOException, InterruptedException { Random rnd = new Random(System.currentTimeMillis()); String temporaryDatasetId = "_dataflow_temporary_dataset_" + rnd.nextInt(1000000); String temporaryTableId = "dataflow_temporary_table_" + rnd.nextInt(1000000); TableReference tempTableReference = new TableReference() .setProjectId(projectId) .setDatasetId(temporaryDatasetId) .setTableId(temporaryTableId); createNewDataset(projectId, temporaryDatasetId); createNewTable( projectId, temporaryDatasetId, new Table().setTableReference(tempTableReference)); JobConfigurationQuery jcQuery = new JobConfigurationQuery() .setFlattenResults(false) .setAllowLargeResults(true) .setDestinationTable(tempTableReference) .setQuery(query); JobConfiguration jc = new JobConfiguration().setQuery(jcQuery); Job job = new Job().setConfiguration(jc); Job insertedJob = bqClient.jobs().insert(projectId, job).execute(); GetQueryResultsResponse qResponse; do { qResponse = bqClient .jobs() .getQueryResults(projectId, insertedJob.getJobReference().getJobId()) .execute(); } while (!qResponse.getJobComplete()); final TableSchema schema = qResponse.getSchema(); final List<TableRow> rows = qResponse.getRows(); deleteDataset(projectId, temporaryDatasetId); return !typed ? rows : rows.stream() .map(r -> getTypedTableRow(schema.getFields(), r)) .collect(Collectors.toList()); }