java.util.concurrent.ExecutionException Scala Examples
The following examples show how to use java.util.concurrent.ExecutionException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: QueryTS.scala From spark-riak-connector with Apache License 2.0 | 5 votes |
package com.basho.riak.spark.query import java.util.concurrent.ExecutionException import com.basho.riak.client.core.netty.RiakResponseException import com.basho.riak.client.core.operations.ts.QueryOperation import com.basho.riak.client.core.query.timeseries.{ColumnDescription, Row} import scala.collection.convert.decorateAsScala._ import com.basho.riak.client.core.query.timeseries.CoverageEntry import com.basho.riak.spark.rdd.connector.RiakConnector import com.basho.riak.client.core.util.HostAndPort import com.basho.riak.spark.util.{Dumpable, DumpUtils} case class QueryTS(connector: RiakConnector, queryData: Seq[TSQueryData]) { def nextChunk(tsQueryData: TSQueryData): (Seq[ColumnDescription], Seq[Row]) = { val op = tsQueryData.coverageEntry match { case None => new QueryOperation.Builder(tsQueryData.sql).build() case Some(ce) => new QueryOperation.Builder(tsQueryData.sql).withCoverageContext(ce.getCoverageContext()).build() } try { connector.withSessionDo(tsQueryData.primaryHost.map(Seq(_)))({ session => val qr = session.execute(op).get() qr.getColumnDescriptionsCopy.asScala -> qr.getRowsCopy.asScala }) } catch { case e: ExecutionException => if (e.getCause.isInstanceOf[RiakResponseException] && e.getCause.getMessage.equals("Unknown message code: 90")) { throw new IllegalStateException("Range queries are not supported in your version of Riak", e.getCause) } else { throw e } } } }
Example 2
Source File: KafkaTopicAdmin.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import java.util.concurrent.ExecutionException import cats.data.Reader import com.sky.kafka.configurator.error.TopicNotFound import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG import org.apache.kafka.clients.admin._ import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.UnknownTopicOrPartitionException import org.zalando.grafter.{ Stop, StopResult } import scala.collection.JavaConverters._ import scala.language.postfixOps import scala.util.{ Failure, Success, Try } object KafkaTopicAdmin { def apply(adminClient: AdminClient): KafkaTopicAdmin = new KafkaTopicAdmin(adminClient) def reader: Reader[AppConfig, KafkaTopicAdmin] = Reader { config => import com.sky.kafka.utils.MapToJavaPropertiesConversion.mapToProperties KafkaTopicAdmin(AdminClient.create(Map(BOOTSTRAP_SERVERS_CONFIG -> config.bootstrapServers) ++ config.props)) } } class KafkaTopicAdmin(ac: AdminClient) extends TopicReader with TopicWriter with Stop { override def fetch(topicName: String) = { def topicDescription = Try { val allDescriptions = ac.describeTopics(Seq(topicName).asJava).all.get allDescriptions.get(topicName) } match { case Success(result) => Success(result) case Failure(e: ExecutionException) if e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => Failure(TopicNotFound(topicName)) case other => other } def topicConfig = Try { val allConfigs = ac.describeConfigs(Seq(configResourceForTopic(topicName)).asJava).all.get allConfigs.get(configResourceForTopic(topicName)) } for { desc <- topicDescription partitions = desc.partitions().size() replicationFactor = desc.partitions().asScala.head.replicas().size() config <- topicConfig } yield Topic(desc.name(), partitions, replicationFactor, config) } override def create(topic: Topic) = Try { val newTopic = new NewTopic(topic.name, topic.partitions, topic.replicationFactor.toShort).configs(topic.config.asJava) ac.createTopics(Seq(newTopic).asJava).all().get } override def updateConfig(topicName: String, config: Map[String, Object]) = Try { val c = config.map { case (key, value) => new ConfigEntry(key, value.toString) }.toList.asJava ac.alterConfigs(Map(configResourceForTopic(topicName) -> new Config(c)).asJava).all().get } override def updatePartitions(topicName: String, numPartitions: Int) = Try { ac.createPartitions(Map(topicName -> NewPartitions.increaseTo(numPartitions)).asJava).all().get() } override def stop = StopResult.eval("KafkaAdminClient")(ac.close()) private def configResourceForTopic(topicName: String) = new ConfigResource(ConfigResource.Type.TOPIC, topicName) private implicit def kafkaConfigToMap(config: Config): Map[String, String] = config.entries().asScala.map { entry => entry.name() -> entry.value() } toMap }
Example 3
Source File: CodeGeneratorWithInterpretedFallbackSuite.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst.expressions import java.util.concurrent.ExecutionException import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.expressions.codegen.{CodeAndComment, CodeGenerator} import org.apache.spark.sql.catalyst.plans.PlanTestBase import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.IntegerType class CodeGeneratorWithInterpretedFallbackSuite extends SparkFunSuite with PlanTestBase { object FailedCodegenProjection extends CodeGeneratorWithInterpretedFallback[Seq[Expression], UnsafeProjection] { override protected def createCodeGeneratedObject(in: Seq[Expression]): UnsafeProjection = { val invalidCode = new CodeAndComment("invalid code", Map.empty) // We assume this compilation throws an exception CodeGenerator.compile(invalidCode) null } override protected def createInterpretedObject(in: Seq[Expression]): UnsafeProjection = { InterpretedUnsafeProjection.createProjection(in) } } test("UnsafeProjection with codegen factory mode") { val input = Seq(BoundReference(0, IntegerType, nullable = true)) val codegenOnly = CodegenObjectFactoryMode.CODEGEN_ONLY.toString withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) { val obj = UnsafeProjection.createObject(input) assert(obj.getClass.getName.contains("GeneratedClass$SpecificUnsafeProjection")) } val noCodegen = CodegenObjectFactoryMode.NO_CODEGEN.toString withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> noCodegen) { val obj = UnsafeProjection.createObject(input) assert(obj.isInstanceOf[InterpretedUnsafeProjection]) } } test("fallback to the interpreter mode") { val input = Seq(BoundReference(0, IntegerType, nullable = true)) val fallback = CodegenObjectFactoryMode.FALLBACK.toString withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> fallback) { val obj = FailedCodegenProjection.createObject(input) assert(obj.isInstanceOf[InterpretedUnsafeProjection]) } } test("codegen failures in the CODEGEN_ONLY mode") { val errMsg = intercept[ExecutionException] { val input = Seq(BoundReference(0, IntegerType, nullable = true)) val codegenOnly = CodegenObjectFactoryMode.CODEGEN_ONLY.toString withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> codegenOnly) { FailedCodegenProjection.createObject(input) } }.getMessage assert(errMsg.contains("failed to compile: org.codehaus.commons.compiler.CompileException:")) } }
Example 4
Source File: OAuthToken.scala From spark-power-bi with Apache License 2.0 | 5 votes |
package com.granturing.spark.powerbi import java.util.concurrent.{ExecutionException, TimeUnit, Executors} import com.microsoft.aad.adal4j.{AuthenticationResult, AuthenticationCallback, AuthenticationContext} import dispatch._ import org.apache.spark.Logging import scala.concurrent.{Await, promise} import scala.util.{Try, Failure, Success} private class OAuthReq(token: OAuthTokenHandler) extends (Req => Req) { override def apply(req: Req): Req = { req <:< Map("Authorization" -> s"Bearer ${token()}") } } private class OAuthTokenHandler(authConf: ClientConf, initialToken: Option[String] = None) extends Logging { private var _token: Option[String] = initialToken def apply(refresh: Boolean = false): String = { _token match { case Some(s) if !refresh => s case _ => { refreshToken match { case Success(s) => { _token = Some(s) s } case Failure(e) => throw e } } } } private def refreshToken: Try[String] = { log.info("refreshing OAuth token") val service = Executors.newFixedThreadPool(1); val context = new AuthenticationContext(authConf.token_uri, true, service) val p = promise[AuthenticationResult] val future = p.future context.acquireToken(authConf.resource, authConf.clientid, authConf.username, authConf.password, new AuthenticationCallback { def onSuccess(result: AuthenticationResult): Unit = { p.success(result) } def onFailure(ex: Throwable): Unit = { p.failure(ex) } }) try { val result = Await.result(future, authConf.timeout) log.info("OAuth token refresh successful") Success(result.getAccessToken) } catch { case e: ExecutionException => Failure(e.getCause) case t: Throwable => Failure(t) } finally { service.shutdown() } } }
Example 5
Source File: KeyVaultADALAuthenticator.scala From azure-kusto-spark with Apache License 2.0 | 5 votes |
package com.microsoft.kusto.spark.utils import java.net.MalformedURLException import java.util.concurrent.{ExecutionException, ExecutorService, Executors, Future} import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult, ClientCredential} import com.microsoft.azure.keyvault.KeyVaultClient import com.microsoft.azure.keyvault.authentication.KeyVaultCredentials class KeyVaultADALAuthenticator(clientId: String, clientKey: String) { def getAuthenticatedClient: KeyVaultClient = { // Creates the KeyVaultClient using the created credentials. new KeyVaultClient(createCredentials) } private def createCredentials: KeyVaultCredentials = { new KeyVaultCredentials() { //Callback that supplies the token type and access token on request. override def doAuthenticate(authorization: String, resource: String, scope: String): String = { try { val authResult = getAccessToken(authorization, resource) authResult.getAccessToken } catch { case e: Exception => KustoDataSourceUtils.logError("KeyVaultADALAuthenticator", "Exception trying to access Key Vault:" + e.getMessage) "" } } } } @throws[InterruptedException] @throws[ExecutionException] @throws[MalformedURLException] private def getAccessToken(authorization: String, resource: String): AuthenticationResult = { var result: AuthenticationResult = null var service: ExecutorService = null //Starts a service to fetch access token. try { service = Executors.newFixedThreadPool(1) val context = new AuthenticationContext(authorization, false, service) //Acquires token based on client ID and client secret. var future: Future[AuthenticationResult] = null if (clientId != null && clientKey != null) { val credentials = new ClientCredential(clientId, clientKey) future = context.acquireToken(resource, credentials, null) } result = future.get } finally service.shutdown() if (result == null) throw new RuntimeException("Authentication results were null.") result } }
Example 6
Source File: FutureCallback.scala From hazelcast-scala with Apache License 2.0 | 5 votes |
package com.hazelcast.Scala import com.hazelcast.core.ExecutionCallback import scala.concurrent.Promise import java.util.concurrent.ExecutionException import scala.util.control.NonFatal private[Scala] final class FutureCallback[X, R](nullReplacement: R = null.asInstanceOf[R])(implicit map: X => R) extends ExecutionCallback[X] { private[this] val promise = Promise[R] def future = promise.future def onFailure(th: Throwable) = th match { case e: ExecutionException => onFailure(e.getCause) case e => promise.failure(e) } def onResponse(res: X) = res match { case th: Throwable => onFailure(th) case null => promise success nullReplacement case value => try { promise success map(value) } catch { case NonFatal(e) => onFailure(e) } } }
Example 7
Source File: CachedKafkaProducer.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.kafka010 import java.{util => ju} import java.util.concurrent.{ConcurrentMap, ExecutionException, TimeUnit} import com.google.common.cache._ import com.google.common.util.concurrent.{ExecutionError, UncheckedExecutionException} import org.apache.kafka.clients.producer.KafkaProducer import scala.collection.JavaConverters._ import scala.util.control.NonFatal import org.apache.spark.SparkEnv import org.apache.spark.internal.Logging private[kafka010] object CachedKafkaProducer extends Logging { private type Producer = KafkaProducer[Array[Byte], Array[Byte]] private lazy val cacheExpireTimeout: Long = SparkEnv.get.conf.getTimeAsMs("spark.kafka.producer.cache.timeout", "10m") private val cacheLoader = new CacheLoader[Seq[(String, Object)], Producer] { override def load(config: Seq[(String, Object)]): Producer = { val configMap = config.map(x => x._1 -> x._2).toMap.asJava createKafkaProducer(configMap) } } private val removalListener = new RemovalListener[Seq[(String, Object)], Producer]() { override def onRemoval( notification: RemovalNotification[Seq[(String, Object)], Producer]): Unit = { val paramsSeq: Seq[(String, Object)] = notification.getKey val producer: Producer = notification.getValue logDebug( s"Evicting kafka producer $producer params: $paramsSeq, due to ${notification.getCause}") close(paramsSeq, producer) } } private lazy val guavaCache: LoadingCache[Seq[(String, Object)], Producer] = CacheBuilder.newBuilder().expireAfterAccess(cacheExpireTimeout, TimeUnit.MILLISECONDS) .removalListener(removalListener) .build[Seq[(String, Object)], Producer](cacheLoader) private def createKafkaProducer(producerConfiguration: ju.Map[String, Object]): Producer = { val kafkaProducer: Producer = new Producer(producerConfiguration) logDebug(s"Created a new instance of KafkaProducer for $producerConfiguration.") kafkaProducer } private def close(paramsSeq: Seq[(String, Object)], producer: Producer): Unit = { try { logInfo(s"Closing the KafkaProducer with params: ${paramsSeq.mkString("\n")}.") producer.close() } catch { case NonFatal(e) => logWarning("Error while closing kafka producer.", e) } } private def clear(): Unit = { logInfo("Cleaning up guava cache.") guavaCache.invalidateAll() } // Intended for testing purpose only. private def getAsMap: ConcurrentMap[Seq[(String, Object)], Producer] = guavaCache.asMap() }
Example 8
Source File: CachedPulsarClient.scala From pulsar-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.pulsar import java.{util => ju} import java.util.concurrent.{ConcurrentMap, ExecutionException, TimeUnit} import scala.collection.JavaConverters._ import scala.util.control.NonFatal import com.google.common.cache._ import com.google.common.util.concurrent.{ExecutionError, UncheckedExecutionException} import org.apache.spark.SparkEnv import org.apache.spark.internal.Logging private[pulsar] object CachedPulsarClient extends Logging { private type Client = org.apache.pulsar.client.api.PulsarClient private val defaultCacheExpireTimeout = TimeUnit.MINUTES.toMillis(10) private lazy val cacheExpireTimeout: Long = Option(SparkEnv.get) .map(_.conf .getTimeAsMs("spark.pulsar.client.cache.timeout", s"${defaultCacheExpireTimeout}ms")) .getOrElse(defaultCacheExpireTimeout) private val cacheLoader = new CacheLoader[Seq[(String, Object)], Client] { override def load(config: Seq[(String, Object)]): Client = { val configMap = config.map(x => x._1 -> x._2).toMap.asJava createPulsarClient(configMap) } } private val removalListener = new RemovalListener[Seq[(String, Object)], Client]() { override def onRemoval( notification: RemovalNotification[Seq[(String, Object)], Client]): Unit = { val paramsSeq: Seq[(String, Object)] = notification.getKey val client: Client = notification.getValue logDebug( s"Evicting pulsar producer $client params: $paramsSeq, due to ${notification.getCause}") close(paramsSeq, client) } } private lazy val guavaCache: LoadingCache[Seq[(String, Object)], Client] = CacheBuilder .newBuilder() .expireAfterAccess(cacheExpireTimeout, TimeUnit.MILLISECONDS) .removalListener(removalListener) .build[Seq[(String, Object)], Client](cacheLoader) private def createPulsarClient(pulsarConf: ju.Map[String, Object]): Client = { val pulsarServiceUrl = pulsarConf.get(PulsarOptions.SERVICE_URL_OPTION_KEY).asInstanceOf[String] val clientConf = new PulsarConfigUpdater( "pulsarClientCache", pulsarConf.asScala.toMap, PulsarOptions.FILTERED_KEYS ).rebuild() logInfo(s"Client Conf = ${clientConf}") try { val pulsarClient: Client = org.apache.pulsar.client.api.PulsarClient .builder() .serviceUrl(pulsarServiceUrl) .loadConf(clientConf) .build(); logDebug( s"Created a new instance of PulsarClient for serviceUrl = $pulsarServiceUrl," + s" clientConf = $clientConf.") pulsarClient } catch { case e: Throwable => logError( s"Failed to create PulsarClient to serviceUrl ${pulsarServiceUrl}" + s" using client conf ${clientConf}", e) throw e } } private def close(paramsSeq: Seq[(String, Object)], client: Client): Unit = { try { logInfo(s"Closing the Pulsar Client with params: ${paramsSeq.mkString("\n")}.") client.close() } catch { case NonFatal(e) => logWarning("Error while closing pulsar producer.", e) } } private[pulsar] def clear(): Unit = { logInfo("Cleaning up guava cache.") guavaCache.invalidateAll() } // Intended for testing purpose only. private def getAsMap: ConcurrentMap[Seq[(String, Object)], Client] = guavaCache.asMap() }