Python google.cloud.bigquery.ScalarQueryParameter() Examples
The following are 20
code examples of google.cloud.bigquery.ScalarQueryParameter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
google.cloud.bigquery
, or try the search function
.
Example #1
Source File: client_query_w_struct_params.py From python-bigquery with Apache License 2.0 | 6 votes |
def client_query_w_struct_params(): # [START bigquery_query_params_structs] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = "SELECT @struct_value AS s;" job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.StructQueryParameter( "struct_value", bigquery.ScalarQueryParameter("x", "INT64", 1), bigquery.ScalarQueryParameter("y", "STRING", "foo"), ) ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print(row.s) # [END bigquery_query_params_structs]
Example #2
Source File: samples_test.py From python-docs-samples with Apache License 2.0 | 6 votes |
def test_client_library_query_with_parameters(): # [START bigquery_migration_client_library_query_parameters] from google.cloud import bigquery client = bigquery.Client() sql = """ SELECT name FROM `bigquery-public-data.usa_names.usa_1910_current` WHERE state = @state LIMIT @limit """ query_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter('state', 'STRING', 'TX'), bigquery.ScalarQueryParameter('limit', 'INTEGER', 100) ] ) df = client.query(sql, job_config=query_config).to_dataframe() # [END bigquery_migration_client_library_query_parameters] assert len(df) > 0
Example #3
Source File: client_query_w_struct_params.py From python-bigquery with Apache License 2.0 | 6 votes |
def client_query_w_struct_params(): # [START bigquery_query_params_structs] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = "SELECT @struct_value AS s;" job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.StructQueryParameter( "struct_value", bigquery.ScalarQueryParameter("x", "INT64", 1), bigquery.ScalarQueryParameter("y", "STRING", "foo"), ) ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print(row.s) # [END bigquery_query_params_structs]
Example #4
Source File: client_query_w_array_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_array_params(): # [START bigquery_query_params_arrays] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = """ SELECT name, sum(number) as count FROM `bigquery-public-data.usa_names.usa_1910_2013` WHERE gender = @gender AND state IN UNNEST(@states) GROUP BY name ORDER BY count DESC LIMIT 10; """ job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter("gender", "STRING", "M"), bigquery.ArrayQueryParameter("states", "STRING", ["WA", "WI", "WV", "WY"]), ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print("{}: \t{}".format(row.name, row.count)) # [END bigquery_query_params_arrays]
Example #5
Source File: STARRTreatmentTeamConversion.py From CDSS with GNU General Public License v3.0 | 5 votes |
def querySourceItems(self, convOptions): """Query the database for list of all source clinical items (medications, etc.) and yield the results one at a time. If startDate provided, only return items whose occurrence date is on or after that date. """ # TODO need to figure out how to pass date to query in BQ using SQLQuery object query = "SELECT {} FROM {}".format(', '.join(self.HEADERS), SOURCE_TABLE) if convOptions.startDate is not None: query += ' WHERE trtmnt_tm_begin_dt_jittered >= @startDate ' if convOptions.endDate is not None: query += ' WHERE ' if convOptions.startDate is None else 'AND' query += ' trtmnt_tm_begin_dt_jittered < @endDate' query += ' ORDER BY trtmnt_tm_begin_dt_jittered' query += ';' query_params = [ bigquery.ScalarQueryParameter( 'startDate', 'TIMESTAMP', convOptions.startDate, ), bigquery.ScalarQueryParameter( 'endDate', 'TIMESTAMP', convOptions.endDate, ) ] query_job = self.bqClient.queryBQ(str(query), query_params=query_params, location='US', batch_mode=False, verbose=True) for row in query_job: # API request - fetches results rowModel = RowItemModel(list(row.values()), self.HEADERS) log.debug("rowModel: {}".format(rowModel)) yield self.normalizeRowModel(rowModel, convOptions) # Yield one row worth of data at a time to avoid having to keep the whole result set in memory
Example #6
Source File: bqclient.py From bigquery_fdw with MIT License | 5 votes |
def setParameter(self, type_, value): """ Prepare a parameter for a parameterized query As documented by Google, only standard SQL syntax supports parameters in queries See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/bigquery/cloud-client/query_params.py """ return bigquery.ScalarQueryParameter( # Set the name to None to use positional parameters (? symbol # in the query). Note that you cannot mix named and positional # parameters. None, type_, self.varToString(value))
Example #7
Source File: client_query_w_named_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_named_params(): # [START bigquery_query_params_named] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = """ SELECT word, word_count FROM `bigquery-public-data.samples.shakespeare` WHERE corpus = @corpus AND word_count >= @min_word_count ORDER BY word_count DESC; """ job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter("corpus", "STRING", "romeoandjuliet"), bigquery.ScalarQueryParameter("min_word_count", "INT64", 250), ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print("{}: \t{}".format(row.word, row.word_count)) # [END bigquery_query_params_named]
Example #8
Source File: client_query_w_positional_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_positional_params(): # [START bigquery_query_params_positional] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = """ SELECT word, word_count FROM `bigquery-public-data.samples.shakespeare` WHERE corpus = ? AND word_count >= ? ORDER BY word_count DESC; """ # Set the name to None to use positional parameters. # Note that you cannot mix named and positional parameters. job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter(None, "STRING", "romeoandjuliet"), bigquery.ScalarQueryParameter(None, "INT64", 250), ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print("{}: \t{}".format(row.word, row.word_count)) # [END bigquery_query_params_positional]
Example #9
Source File: client_query_w_timestamp_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_timestamp_params(): # [START bigquery_query_params_timestamps] import datetime import pytz from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);" job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter( "ts_value", "TIMESTAMP", datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC), ) ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print(row) # [END bigquery_query_params_timestamps]
Example #10
Source File: _helpers.py From python-bigquery with Apache License 2.0 | 5 votes |
def scalar_to_query_parameter(value, name=None): """Convert a scalar value into a query parameter. Args: value (Any): A scalar value to convert into a query parameter. name (str): (Optional) Name of the query parameter. Returns: google.cloud.bigquery.ScalarQueryParameter: A query parameter corresponding with the type and value of the plain Python object. Raises: google.cloud.bigquery.dbapi.exceptions.ProgrammingError: if the type cannot be determined. """ parameter_type = bigquery_scalar_type(value) if parameter_type is None: raise exceptions.ProgrammingError( "encountered parameter {} with value {} of unexpected type".format( name, value ) ) return bigquery.ScalarQueryParameter(name, parameter_type, value)
Example #11
Source File: client.py From ibis with Apache License 2.0 | 5 votes |
def bq_param_timestamp(param, value): assert isinstance(param.type(), dt.Timestamp), str(param.type()) # TODO(phillipc): Not sure if this is the correct way to do this. timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime() return bq.ScalarQueryParameter( param.get_name(), 'TIMESTAMP', timestamp_value )
Example #12
Source File: client_query_w_named_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_named_params(): # [START bigquery_query_params_named] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = """ SELECT word, word_count FROM `bigquery-public-data.samples.shakespeare` WHERE corpus = @corpus AND word_count >= @min_word_count ORDER BY word_count DESC; """ job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter("corpus", "STRING", "romeoandjuliet"), bigquery.ScalarQueryParameter("min_word_count", "INT64", 250), ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print("{}: \t{}".format(row.word, row.word_count)) # [END bigquery_query_params_named]
Example #13
Source File: client_query_w_positional_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_positional_params(): # [START bigquery_query_params_positional] from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = """ SELECT word, word_count FROM `bigquery-public-data.samples.shakespeare` WHERE corpus = ? AND word_count >= ? ORDER BY word_count DESC; """ # Set the name to None to use positional parameters. # Note that you cannot mix named and positional parameters. job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter(None, "STRING", "romeoandjuliet"), bigquery.ScalarQueryParameter(None, "INT64", 250), ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print("{}: \t{}".format(row.word, row.word_count)) # [END bigquery_query_params_positional]
Example #14
Source File: client_query_w_timestamp_params.py From python-bigquery with Apache License 2.0 | 5 votes |
def client_query_w_timestamp_params(): # [START bigquery_query_params_timestamps] import datetime import pytz from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() query = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);" job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ScalarQueryParameter( "ts_value", "TIMESTAMP", datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC), ) ] ) query_job = client.query(query, job_config=job_config) # Make an API request. for row in query_job: print(row) # [END bigquery_query_params_timestamps]
Example #15
Source File: client.py From ibis with Apache License 2.0 | 5 votes |
def bq_param_boolean(param, value): return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)
Example #16
Source File: client.py From ibis with Apache License 2.0 | 5 votes |
def bq_param_double(param, value): return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)
Example #17
Source File: client.py From ibis with Apache License 2.0 | 5 votes |
def bq_param_integer(param, value): return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)
Example #18
Source File: client.py From ibis with Apache License 2.0 | 5 votes |
def bq_param_string(param, value): return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)
Example #19
Source File: data_prep.py From professional-services with Apache License 2.0 | 4 votes |
def create_table( from_date, to_date, table_name, query_file, dataset, price_scaling, client): """Creates training, validation, and test tables. Specifies parameters to be passed to the SQL query, specifies name for the new table being created, generates a dynamic query and executes the query. Args: from_date: Intial date for table's data. to_date: Final date for table's data. table_name: Name for table. query_file: Path to file containing the SQL query. dataset: `BigQuery` `Dataset` in which to save the table. price_scaling: Float used to scale (multiply with) the labels (price) for scaling purposes. Given the initialization schemes and normalized inputs, the expected values for the outputs will be close to 0. This means that by scaling the labels you will not be too far off from the start, which helps convergence. If a target is too big, the mean squared error will be huge which means your gradients will also be huge and could lead to numerical instability. client: `google.cloud.bigquery.client.Client` instance. """ query_params = [ bigquery.ScalarQueryParameter( 'from_date', 'STRING', from_date), bigquery.ScalarQueryParameter( 'to_date', 'STRING', to_date), bigquery.ScalarQueryParameter( 'price_scaling', 'FLOAT64', price_scaling)] table_ref = client.dataset( dataset).table( table_name) job_config = bigquery.QueryJobConfig() job_config.query_parameters = query_params job_config.destination = table_ref with open(query_file, 'r') as myfile: inner_query = myfile.read() run_query( client, scalar_extraction_query(inner_query), job_config)
Example #20
Source File: data_prep.py From professional-services with Apache License 2.0 | 4 votes |
def generate_mean_std(client, parameters): """Computes mean and standard deviation. Runs BigQuery query to compute mean and standard deviation of all weather features and saves the results to storage. Args: client: `google.cloud.bigquery.client.Client` instance. parameters: Parameters passed to script. """ query_params = [ bigquery.ScalarQueryParameter( 'train_from_date', 'STRING', parameters.train_from_date), bigquery.ScalarQueryParameter( 'train_to_date', 'STRING', parameters.train_to_date)] job_config = bigquery.QueryJobConfig() job_config.query_parameters = query_params with open(parameters.weather_mean_std_file, 'r') as myfile: query = myfile.read() results = run_query( client, query, job_config) for row in results: mean = np.array(row.mean, dtype=np.float32) std = np.array(row.std, dtype=np.float32) with file_io.FileIO( parameters.mean_path, mode='wb+' ) as f: pickle.dump(mean, f, protocol=2) with file_io.FileIO( parameters.std_path, mode='wb+' ) as f: pickle.dump(std, f, protocol=2)