com.google.api.client.googleapis.auth.oauth2.GoogleCredential Scala Examples
The following examples show how to use com.google.api.client.googleapis.auth.oauth2.GoogleCredential.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DefaultSource.scala From spark-google-analytics with Apache License 2.0 | 5 votes |
package com.crealytics.google.analytics import java.io.File import java.util import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport import com.google.api.client.json.gson.GsonFactory import com.google.api.services.analytics.{Analytics, AnalyticsScopes} import org.apache.spark.sql.SQLContext import org.apache.spark.sql.sources._ import com.google.api.client.auth.oauth2.Credential class DefaultSource extends RelationProvider { val jsonFactory = GsonFactory.getDefaultInstance val httpTransport = GoogleNetHttpTransport.newTrustedTransport() override def createRelation( sqlContext: SQLContext, parameters: Map[String, String]): AnalyticsRelation = { val credentialsFromSecretAndToken = for { clientId <- parameters.get("clientId") clientSecret <- parameters.get("clientSecret") refreshToken <- parameters.get("refreshToken") } yield { val credentials = new GoogleCredential.Builder().setTransport(httpTransport) .setJsonFactory(jsonFactory) .setClientSecrets(clientId, clientSecret) .build() credentials.setRefreshToken(refreshToken) credentials } val credentialsFromKeyFile = for { keyFileLocation <- parameters.get("keyFileLocation") serviceAccountId <- parameters.get("serviceAccountId") } yield { new GoogleCredential.Builder().setTransport(httpTransport) .setJsonFactory(jsonFactory) .setServiceAccountId(serviceAccountId) .setServiceAccountPrivateKeyFromP12File(new File(keyFileLocation)) .setServiceAccountScopes(util.Arrays.asList(AnalyticsScopes.ANALYTICS_READONLY)) .build() } val credentials = credentialsFromSecretAndToken .orElse(credentialsFromKeyFile) .getOrElse( throw new Exception("Please provide valid credentials information. See README file for more info.")) val analytics = new Analytics.Builder(httpTransport, jsonFactory, credentials) .setApplicationName("spark-google-analytics") .build() val calculatedMetrics = parameters.getOrElse("calculatedMetrics", "").split(",").map(_.trim) val queryIndividualDays: Boolean = parameters.getOrElse("queryIndividualDays", "false") == "true" AnalyticsRelation( analytics, parameters("ids"), parameters("startDate"), parameters("endDate"), calculatedMetrics, queryIndividualDays )(sqlContext) } }
Example 2
Source File: GoogleDriveClient.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.deeplang.doperations.readwritedataframe.googlestorage import java.io.{ByteArrayInputStream, FileOutputStream} import java.util import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport import com.google.api.client.http.FileContent import com.google.api.client.json.gson.GsonFactory import com.google.api.services.drive.model.File import com.google.api.services.drive.{Drive, DriveScopes} import ai.deepsense.commons.resources.ManagedResource import ai.deepsense.commons.utils.LoggerForCallerClass import ai.deepsense.deeplang.doperations.inout.CsvParameters.ColumnSeparatorChoice private[googlestorage] object GoogleDriveClient { val logger = LoggerForCallerClass() val googleSheetCsvSeparator = ColumnSeparatorChoice.Comma() private val ApplicationName = "Seahorse" private val Scopes = util.Arrays.asList(DriveScopes.DRIVE) def uploadCsvFileAsGoogleSheet( credentials: GoogleCretendialsJson, sheetId: GoogleSheetId, filePath: String ): Unit = { val fileMetadata = new File().setMimeType("application/vnd.google-apps.spreadsheet") val mediaContent = new FileContent("text/csv", new java.io.File(filePath)) driveService(credentials).files.update(sheetId, fileMetadata, mediaContent).execute } def downloadGoogleSheetAsCsvFile( credentials: GoogleCretendialsJson, sheetId: GoogleSheetId, filePath: String ): Unit = { val file = new java.io.File(filePath) file.getParentFile.mkdirs() ManagedResource(new FileOutputStream(file)) { fos => driveService(credentials).files().export(sheetId, "text/csv").executeMediaAndDownloadTo(fos) logger.info(s"Downloaded google sheet id=$sheetId to the file $filePath") } } private def driveService(serviceAccountCredentials: String): Drive = { val credential = { val in = new ByteArrayInputStream(serviceAccountCredentials.getBytes) GoogleCredential.fromStream(in).createScoped(Scopes) } new Drive.Builder( GoogleNetHttpTransport.newTrustedTransport(), jsonFactory, credential ).setApplicationName(ApplicationName).build } // Default choice is JacksonFactory. However spark depends on Jackson as well // and google/spark jackson versions are binary incompatible with each other. private val jsonFactory = GsonFactory.getDefaultInstance }
Example 3
Source File: BigQuerySampler.scala From ratatool with Apache License 2.0 | 5 votes |
package com.spotify.ratatool.samplers import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.util.Utils import com.google.api.services.bigquery.model.{TableReference, TableRow, TableSchema} import com.google.api.services.bigquery.{Bigquery, BigqueryScopes} import org.apache.beam.sdk.io.gcp.bigquery.{BigQueryHelpers, PatchedBigQueryTableRowIterator} import org.slf4j.{Logger, LoggerFactory} import scala.jdk.CollectionConverters._ import scala.collection.mutable.ListBuffer class BigQuerySampler(tableRef: TableReference, protected val seed: Option[Long] = None) extends Sampler[TableRow]{ private val logger: Logger = LoggerFactory.getLogger(classOf[BigQuerySampler]) private val bigquery: Bigquery = { val scopes = List(BigqueryScopes.BIGQUERY).asJava val credential = GoogleCredential.getApplicationDefault.createScoped(scopes) new Bigquery.Builder(Utils.getDefaultTransport, Utils.getDefaultJsonFactory, credential) .setApplicationName("sampler") .build() } private lazy val table = bigquery .tables() .get(tableRef.getProjectId, tableRef.getDatasetId, tableRef.getTableId) .execute() override def sample(n: Long, head: Boolean): Seq[TableRow] = { require(n > 0, "n must be > 0") require(head, "BigQuery can only be used with --head") logger.info("Taking a sample of {} from BigQuery table {}", n, BigQueryHelpers.toTableSpec(tableRef)) val numRows = BigInt(table.getNumRows) val iterator = PatchedBigQueryTableRowIterator.fromTable(tableRef, bigquery) iterator.open() val result = ListBuffer.empty[TableRow] while (result.length < (numRows min n) && iterator.advance()) { result.append(iterator.getCurrent) } result.toList } def schema: TableSchema = table.getSchema }
Example 4
Source File: BigQueryIO.scala From ratatool with Apache License 2.0 | 5 votes |
package com.spotify.ratatool.io import java.util import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.util.Utils import com.google.api.services.bigquery.model.{Table, TableReference, TableRow, TableSchema} import com.google.api.services.bigquery.{Bigquery, BigqueryScopes} import org.apache.beam.sdk.io.gcp.bigquery.{BigQueryOptions, PatchedBigQueryServicesImpl, InsertRetryPolicy, PatchedBigQueryTableRowIterator} import org.apache.beam.sdk.options.PipelineOptionsFactory import org.apache.beam.sdk.transforms.windowing.{GlobalWindow, PaneInfo} import org.apache.beam.sdk.values.ValueInSingleWindow import org.joda.time.Instant import scala.jdk.CollectionConverters._ import com.spotify.scio.bigquery.client.BigQuery def writeToTable(data: Seq[TableRow], schema: TableSchema, tableSpec: String): Unit = writeToTable(data, schema, parseTableSpec(tableSpec)) } private class TableRowIterator(private val iter: PatchedBigQueryTableRowIterator) extends Iterator[TableRow] { private var _isOpen = false private var _hasNext = false private def init(): Unit = if (!_isOpen) { iter.open() _isOpen = true _hasNext = iter.advance() } override def hasNext: Boolean = { init() _hasNext } override def next(): TableRow = { init() if (_hasNext) { val r = iter.getCurrent _hasNext = iter.advance() r } else { throw new NoSuchElementException } } }
Example 5
Source File: GoogleDriveClient.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.deeplang.doperations.readwritedataframe.googlestorage import java.io.{ByteArrayInputStream, FileOutputStream} import java.util import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport import com.google.api.client.http.FileContent import com.google.api.client.json.gson.GsonFactory import com.google.api.services.drive.model.File import com.google.api.services.drive.{Drive, DriveScopes} import io.deepsense.commons.resources.ManagedResource import io.deepsense.commons.utils.LoggerForCallerClass import io.deepsense.deeplang.doperations.inout.CsvParameters.ColumnSeparatorChoice private[googlestorage] object GoogleDriveClient { val logger = LoggerForCallerClass() val googleSheetCsvSeparator = ColumnSeparatorChoice.Comma() private val ApplicationName = "Seahorse" private val Scopes = util.Arrays.asList(DriveScopes.DRIVE) def uploadCsvFileAsGoogleSheet( credentials: GoogleCretendialsJson, sheetId: GoogleSheetId, filePath: String ): Unit = { val fileMetadata = new File().setMimeType("application/vnd.google-apps.spreadsheet") val mediaContent = new FileContent("text/csv", new java.io.File(filePath)) driveService(credentials).files.update(sheetId, fileMetadata, mediaContent).execute } def downloadGoogleSheetAsCsvFile( credentials: GoogleCretendialsJson, sheetId: GoogleSheetId, filePath: String ): Unit = { val file = new java.io.File(filePath) file.getParentFile.mkdirs() ManagedResource(new FileOutputStream(file)) { fos => driveService(credentials).files().export(sheetId, "text/csv").executeMediaAndDownloadTo(fos) logger.info(s"Downloaded google sheet id=$sheetId to the file $filePath") } } private def driveService(serviceAccountCredentials: String): Drive = { val credential = { val in = new ByteArrayInputStream(serviceAccountCredentials.getBytes) GoogleCredential.fromStream(in).createScoped(Scopes) } new Drive.Builder( GoogleNetHttpTransport.newTrustedTransport(), jsonFactory, credential ).setApplicationName(ApplicationName).build } // Default choice is JacksonFactory. However spark depends on Jackson as well // and google/spark jackson versions are binary incompatible with each other. private val jsonFactory = GsonFactory.getDefaultInstance }
Example 6
Source File: AdWordsAuthHelper.scala From spark-google-adwords with Apache License 2.0 | 5 votes |
package com.crealytics.google.adwords import com.google.api.client.googleapis.auth.oauth2.GoogleAuthorizationCodeFlow import com.google.api.client.googleapis.auth.oauth2.GoogleCredential import com.google.api.client.http.javanet.NetHttpTransport import com.google.api.client.json.jackson2.JacksonFactory import com.google.common.collect.Lists class AdWordsAuthHelper(clientId: String, clientSecret: String) { val ADWORDS_API_SCOPE = "https://www.googleapis.com/auth/adwords" private val SCOPES = Lists.newArrayList(ADWORDS_API_SCOPE) private val CALLBACK_URL = "urn:ietf:wg:oauth:2.0:oob" val authorizationFlow: GoogleAuthorizationCodeFlow = new GoogleAuthorizationCodeFlow.Builder( new NetHttpTransport(), new JacksonFactory(), clientId, clientSecret, SCOPES ).setAccessType("offline") .build() val authorizationUrl: String = authorizationFlow.newAuthorizationUrl().setRedirectUri(CALLBACK_URL).build() def getRefreshToken(authorizationCode: String): String = { val tokenRequest = authorizationFlow.newTokenRequest(authorizationCode) tokenRequest.setRedirectUri(CALLBACK_URL) val tokenResponse = tokenRequest.execute() val credential = new GoogleCredential.Builder() .setTransport(new NetHttpTransport()) .setJsonFactory(new JacksonFactory()) .setClientSecrets(clientId, clientSecret) .build() credential.setFromTokenResponse(tokenResponse) credential.getRefreshToken } }