java.util.ServiceLoader Scala Examples
The following examples show how to use java.util.ServiceLoader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SpiServiceLoader.scala From feel-scala with Apache License 2.0 | 5 votes |
package org.camunda.feel.impl import java.util.ServiceLoader import org.camunda.feel.context.{CustomFunctionProvider, FunctionProvider} import org.camunda.feel.valuemapper.{CustomValueMapper, ValueMapper} import scala.reflect.{ClassTag, classTag} import scala.collection.JavaConverters._ object SpiServiceLoader { def loadValueMapper: ValueMapper = { val customValueMappers = loadServiceProvider[CustomValueMapper]() ValueMapper.CompositeValueMapper(customValueMappers) } def loadFunctionProvider: FunctionProvider = loadServiceProvider[CustomFunctionProvider]() match { case Nil => FunctionProvider.EmptyFunctionProvider case p :: Nil => p case ps => FunctionProvider.CompositeFunctionProvider(ps) } private def loadServiceProvider[T: ClassTag](): List[T] = try { val loader = ServiceLoader.load(classTag[T].runtimeClass.asInstanceOf[Class[T]]) loader.iterator.asScala.toList } catch { case t: Throwable => System.err.println( s"Failed to load service provider: ${classTag[T].runtimeClass.getSimpleName}") t.printStackTrace() throw t } }
Example 2
Source File: ConnectorFactory.scala From darwin with Apache License 2.0 | 5 votes |
package it.agilelab.darwin.common import java.util.ServiceLoader import com.typesafe.config.Config import it.agilelab.darwin.manager.exception.ConnectorNotFoundException import it.agilelab.darwin.manager.util.ConfigurationKeys import it.agilelab.darwin.common.compat._ def creator(conf: Config): Option[ConnectorCreator] = { if (conf.hasPath(ConfigurationKeys.CONNECTOR)) { creator(conf.getString(ConfigurationKeys.CONNECTOR)) } else { creator() } } def connector(config: Config): Connector = { val cnt = creator(config).map(_.create(config)) .getOrElse(throw new ConnectorNotFoundException(config)) if (config.hasPath(ConfigurationKeys.CREATE_TABLE) && config.getBoolean(ConfigurationKeys.CREATE_TABLE)) { cnt.createTable() } else if (!cnt.tableExists()) { log.warn(s"Darwin table does not exists and has not been created (${ConfigurationKeys.CREATE_TABLE} was false)") log.warn(cnt.tableCreationHint()) } cnt } }
Example 3
Source File: DataSourceManagerFactory.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.xsql import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.util.Utils object DataSourceManagerFactory { def create( datasourceType: String, conf: SparkConf, hadoopConf: Configuration): DataSourceManager = { val loader = Utils.getContextOrSparkClassLoader val serviceLoader = ServiceLoader.load(classOf[DataSourceManager], loader) var cls: Class[_] = null // As we use ServiceLoader to support creating any user provided DataSourceManager here, // META-INF/services/org.apache.spark.sql.sources.DataSourceRegister must be packaged properly // in user's jar, and the implementation of DataSourceManager must have a public parameterless // constructor. For scala language, def this() = this(null...) just work. try { cls = serviceLoader.asScala .filter(_.shortName().equals(datasourceType)) .toList match { case head :: Nil => head.getClass case _ => throw new SparkException(s"error when instantiate datasource ${datasourceType}") } } catch { case _: Exception => throw new SparkException( s"""Can't find corresponding DataSourceManager for ${datasourceType} type, |please check |1. META-INF/services/org.apache.spark.sql.sources.DataSourceRegister is packaged |2. your implementation of DataSourceManager's shortname is ${datasourceType} |3. your implementation of DataSourceManager must have a public parameterless | constructor. For scala language, def this() = this(null, null, ...) just work. """.stripMargin) } try { val constructor = cls.getConstructor(classOf[SparkConf], classOf[Configuration]) val newHadoopConf = new Configuration(hadoopConf) constructor.newInstance(conf, newHadoopConf).asInstanceOf[DataSourceManager] } catch { case _: NoSuchMethodException => try { cls.getConstructor(classOf[SparkConf]).newInstance(conf).asInstanceOf[DataSourceManager] } catch { case _: NoSuchMethodException => cls.getConstructor().newInstance().asInstanceOf[DataSourceManager] } } } }
Example 4
Source File: AlarmFactory.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.alarm import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.spark.SparkException import org.apache.spark.util.Utils object AlarmFactory { def create(alarmName: String, options: Map[String, String]): Alarm = { val loader = Utils.getContextOrSparkClassLoader val serviceLoader = ServiceLoader.load(classOf[Alarm], loader) val AlarmClass = serviceLoader.asScala.filter(_.name.equalsIgnoreCase(alarmName)).toList match { case head :: Nil => head.getClass case _ => throw new SparkException("error when instantiate spark.xsql.alarm.items") } AlarmClass.newInstance().bind(options) } }
Example 5
Source File: MonitorFactory.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.monitor import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.alarm.Alarm import org.apache.spark.util.Utils import org.apache.spark.util.kvstore.KVStore object MonitorFactory { def create( monitorName: String, alarms: Seq[Alarm], appStore: KVStore, conf: SparkConf): Monitor = { val loader = Utils.getContextOrSparkClassLoader val serviceLoader = ServiceLoader.load(classOf[Monitor], loader) val MonitorClass = serviceLoader.asScala .filter(_.item.equals(MonitorItem.withName(monitorName))) .toList match { case head :: Nil => head.getClass case _ => throw new SparkException("error when instantiate spark.xsql.monitor.items") } MonitorClass.newInstance().bind(alarms).bind(appStore).bind(conf) } }
Example 6
Source File: ServiceLoaderBackedExtensionProvider.scala From rug with GNU General Public License v3.0 | 5 votes |
package com.atomist.util import java.util.ServiceLoader import com.atomist.rug.RugRuntimeException import com.typesafe.scalalogging.LazyLogging import scala.collection.JavaConverters._ import scala.reflect.{ClassTag, _} class ServiceLoaderBackedExtensionProvider[T: ClassTag](val keyProvider: T => String) extends LazyLogging { // The following can be cached as it creates issues in shared class loader hierarchies def providerMap: Map[String, T] = { logger.debug(s"Loading providers of type ${classTag[T].runtimeClass.getName} and class loader ${Thread.currentThread().getContextClassLoader}") ServiceLoader.load(classTag[T].runtimeClass).asScala.map { case t: T => val key = keyProvider.apply(t) logger.debug(s"Registered provider '$key' with class '${t.getClass}'") key -> t case wtf => throw new RugRuntimeException("Extension", s"Provider class ${wtf.getClass} must implement ${classTag[T].runtimeClass.getName} interface", null) }.toMap } }
Example 7
Source File: TestSparkDruidIndexerModule.scala From druid-spark-batch with Apache License 2.0 | 5 votes |
package io.druid.indexer.spark import com.google.inject.Binder import com.google.inject.Key import com.google.inject.Module import com.google.inject.name.Names import io.druid.guice.GuiceInjectors import io.druid.guice.JsonConfigProvider import io.druid.guice.annotations.Self import io.druid.initialization.DruidModule import io.druid.initialization.Initialization import io.druid.server.DruidNode import java.util.ServiceLoader import org.scalatest.FlatSpec import org.scalatest.Matchers import scala.collection.JavaConverters._ class TestSparkDruidIndexerModule extends FlatSpec with Matchers { "SparkDruidIndexerModules" should "load properly" in { val loader: ServiceLoader[DruidModule] = ServiceLoader.load(classOf[DruidModule], classOf[TestSparkDruidIndexerModule].getClassLoader) val module: DruidModule = loader.asScala.head module.getClass.getCanonicalName should startWith("io.druid.indexer.spark.SparkDruidIndexerModule") Initialization.makeInjectorWithModules( GuiceInjectors.makeStartupInjector(), Seq( new Module() { override def configure(binder: Binder) = { JsonConfigProvider .bindInstance( binder, Key.get(classOf[DruidNode], classOf[Self]), new DruidNode("spark-indexer-test", null, null, null, true, false) ) binder.bindConstant.annotatedWith(Names.named("servicePort")).to(0) binder.bindConstant.annotatedWith(Names.named("tlsServicePort")).to(-1) } }, module ).asJava ) } }
Example 8
Source File: LanguageFactory.scala From vm with GNU Affero General Public License v3.0 | 5 votes |
package org.mmadt.language import java.util.ServiceLoader import org.mmadt.language.mmlang.mmlangPrinter import org.mmadt.language.obj._ import org.mmadt.language.obj.`type`.Type import org.mmadt.language.obj.value.Value import org.mmadt.language.obj.value.strm.Strm import scala.collection.JavaConverters trait LanguageFactory { def printValue(value: Value[Obj]): String def printType(atype: Type[Obj]): String def printInst(inst: Inst[_, _]): String def printStrm(strm: Strm[Obj]): String } object LanguageFactory { ///////PROVIDERS/////// private lazy val providers: ServiceLoader[LanguageProvider] = ServiceLoader.load(classOf[LanguageProvider]) def getLanguage(name: String): LanguageProvider = JavaConverters.asScalaIterator(providers.iterator()).filter(x => x.name().equals(name)).next() /////////////////////// def printValue(value: Value[Obj])(implicit f: LanguageFactory): String = f.printValue(value) def printType(atype: Type[Obj])(implicit f: LanguageFactory): String = f.printType(atype) def printInst(inst: Inst[_, _])(implicit f: LanguageFactory): String = f.printInst(inst) def printStrm(strm: Strm[Obj])(implicit f: LanguageFactory): String = f.printStrm(strm) implicit val mmlangFactory: LanguageFactory = new LanguageFactory { override def printValue(value: Value[Obj]): String = mmlangPrinter.valueString(value) override def printType(atype: Type[Obj]): String = mmlangPrinter.typeString(atype) override def printInst(inst: Inst[_, _]): String = mmlangPrinter.instString(inst) override def printStrm(strm: Strm[Obj]): String = mmlangPrinter.strmString(strm) } }
Example 9
Source File: SparkSessionInitializer.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.sparkutils.spi import java.util.ServiceLoader import org.apache.spark.sql.SparkSession import scala.collection.JavaConversions._ trait SparkSessionInitializer { def init(sparkSession: SparkSession): Unit } object SparkSessionInitializer { def apply(sparkSession: SparkSession): SparkSession = { val initializers = ServiceLoader.load(classOf[SparkSessionInitializer]) for(initter <- initializers) { initter.init(sparkSession) } sparkSession } }
Example 10
Source File: YARNHadoopDelegationTokenManager.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.yarn.security import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.security.Credentials import org.apache.spark.SparkConf import org.apache.spark.deploy.security.HadoopDelegationTokenManager import org.apache.spark.internal.Logging import org.apache.spark.util.Utils def obtainDelegationTokens(hadoopConf: Configuration, creds: Credentials): Long = { val superInterval = delegationTokenManager.obtainDelegationTokens(hadoopConf, creds) credentialProviders.values.flatMap { provider => if (provider.credentialsRequired(hadoopConf)) { provider.obtainCredentials(hadoopConf, sparkConf, creds) } else { logDebug(s"Service ${provider.serviceName} does not require a token." + s" Check your configuration to see if security is disabled or not.") None } }.foldLeft(superInterval)(math.min) } private def getCredentialProviders: Map[String, ServiceCredentialProvider] = { val providers = loadCredentialProviders providers. filter { p => delegationTokenManager.isServiceEnabled(p.serviceName) } .map { p => (p.serviceName, p) } .toMap } private def loadCredentialProviders: List[ServiceCredentialProvider] = { ServiceLoader.load(classOf[ServiceCredentialProvider], Utils.getContextOrSparkClassLoader) .asScala .toList } }
Example 11
Source File: DownloadableFile.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.kernel.util import java.io.{File, FileInputStream, InputStream} import java.net.{HttpURLConnection, URI} import java.util.ServiceLoader import scala.collection.JavaConverters._ import cats.effect.IO import zio.{RIO, ZIO} import zio.blocking.{Blocking, effectBlocking} trait DownloadableFile { def openStream: IO[InputStream] def size: IO[Long] } trait DownloadableFileProvider { def getFile(uri: URI): Option[DownloadableFile] = provide.lift(uri) def provide: PartialFunction[URI, DownloadableFile] def protocols: Seq[String] object Supported { def unapply(arg: URI): Option[URI] = { Option(arg.getScheme).flatMap(scheme => protocols.find(_ == scheme)).map(_ => arg) } } } object DownloadableFileProvider { private lazy val unsafeLoad = ServiceLoader.load(classOf[DownloadableFileProvider]).iterator.asScala.toList def isSupported(uri: URI): RIO[Blocking, Boolean] = effectBlocking(unsafeLoad).map { providers => Option(uri.getScheme).exists(providers.flatMap(_.protocols).contains) } def getFile(uri: URI): ZIO[Blocking, Throwable, DownloadableFile] = { effectBlocking(unsafeLoad).map { providers => for { scheme <- Option(uri.getScheme) provider <- providers.find(_.protocols.contains(scheme)) file <- provider.getFile(uri) } yield file }.someOrFail(new Exception(s"Unable to find provider for uri $uri")) } } class HttpFileProvider extends DownloadableFileProvider { override def protocols: Seq[String] = Seq("http", "https") override def provide: PartialFunction[URI, DownloadableFile] = { case Supported(uri) => HTTPFile(uri) } } case class HTTPFile(uri: URI) extends DownloadableFile { override def openStream: IO[InputStream] = IO(uri.toURL.openStream()) override def size: IO[Long] = IO(uri.toURL.openConnection().asInstanceOf[HttpURLConnection]).bracket { conn => IO { conn.setRequestMethod("HEAD") conn.getContentLengthLong } } { conn => IO(conn.disconnect())} } class LocalFileProvider extends DownloadableFileProvider { override def protocols: Seq[String] = Seq("file") override def provide: PartialFunction[URI, DownloadableFile] = { case Supported(uri) => LocalFile(uri) } } case class LocalFile(uri: URI) extends DownloadableFile { lazy val file = new File(uri) override def openStream: IO[InputStream] = IO(new FileInputStream(file)) override def size: IO[Long] = IO.pure(file.length()) }
Example 12
Source File: Glow.scala From glow with Apache License 2.0 | 5 votes |
package io.projectglow import java.util.ServiceLoader import scala.collection.JavaConverters._ import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.apache.spark.sql.{DataFrame, SQLUtils, SparkSession} import io.projectglow.common.Named import io.projectglow.sql.{GlowSQLExtensions, SqlExtensionProvider} import io.projectglow.transformers.util.{SnakeCaseMap, StringUtils} def transform(operationName: String, df: DataFrame, options: Map[String, Any]): DataFrame = { val stringValuedMap = options.mapValues { case s: String => s case v => mapper.writeValueAsString(v) }.map(identity) // output of mapValues is not serializable: https://github.com/scala/bug/issues/7005 lookupTransformer(operationName) match { case Some(transformer) => transformer.transform(df, new SnakeCaseMap(stringValuedMap)) case None => throw new IllegalArgumentException(s"No transformer with name $operationName") } } def transform(operationName: String, df: DataFrame, options: (String, Any)*): DataFrame = { transform(operationName, df, options.toMap) } def transform( operationName: String, df: DataFrame, options: java.util.Map[String, String]): DataFrame = { transform(operationName, df, options.asScala.toMap) } private def lookupTransformer(name: String): Option[DataFrameTransformer] = synchronized { transformerLoader.reload() transformerLoader .iterator() .asScala .find(n => StringUtils.toSnakeCase(n.name) == StringUtils.toSnakeCase(name)) } private val transformerLoader = ServiceLoader .load(classOf[DataFrameTransformer]) } object Glow extends GlowBase trait DataFrameTransformer extends Named { def transform(df: DataFrame, options: Map[String, String]): DataFrame }
Example 13
Source File: BigFileDatasource.scala From glow with Apache License 2.0 | 5 votes |
package io.projectglow.sql import java.net.URI import java.util.ServiceLoader import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.rdd.RDD import org.apache.spark.sql.sources._ import org.apache.spark.sql.types.StructType import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode} import io.projectglow.common.{GlowLogging, WithUtils} def write(rdd: RDD[Array[Byte]], path: String) { val uri = new URI(path) uploaders.find(_.canUpload(rdd.sparkContext.hadoopConfiguration, path)) match { case Some(uploader) => uploader.upload(rdd, path) case None => logger.info(s"Could not find a parallel uploader for $path, uploading from the driver") writeFileFromDriver(new Path(uri), rdd) } } private def writeFileFromDriver(path: Path, byteRdd: RDD[Array[Byte]]): Unit = { val sc = byteRdd.sparkContext val fs = path.getFileSystem(sc.hadoopConfiguration) WithUtils.withCloseable(fs.create(path)) { stream => WithUtils.withCachedRDD(byteRdd) { cachedRdd => cachedRdd.count() cachedRdd.toLocalIterator.foreach { chunk => stream.write(chunk) } } } } }