org.apache.spark.util.ShutdownHookManager Scala Examples
The following examples show how to use org.apache.spark.util.ShutdownHookManager.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MesosClusterDispatcher.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos import java.util.concurrent.CountDownLatch import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.deploy.mesos.ui.MesosClusterUI import org.apache.spark.deploy.rest.mesos.MesosRestServer import org.apache.spark.internal.Logging import org.apache.spark.scheduler.cluster.mesos._ import org.apache.spark.util.{ShutdownHookManager, Utils} private[mesos] class MesosClusterDispatcher( args: MesosClusterDispatcherArguments, conf: SparkConf) extends Logging { private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host) private val recoveryMode = conf.get("spark.deploy.recoveryMode", "NONE").toUpperCase() logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode) private val engineFactory = recoveryMode match { case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf) case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode) } private val scheduler = new MesosClusterScheduler(engineFactory, conf) private val server = new MesosRestServer(args.host, args.port, conf, scheduler) private val webUi = new MesosClusterUI( new SecurityManager(conf), args.webUiPort, conf, publicAddress, scheduler) private val shutdownLatch = new CountDownLatch(1) def start(): Unit = { webUi.bind() scheduler.frameworkUrl = conf.get("spark.mesos.dispatcher.webui.url", webUi.activeWebUiUrl) scheduler.start() server.start() } def awaitShutdown(): Unit = { shutdownLatch.await() } def stop(): Unit = { webUi.stop() server.stop() scheduler.stop() shutdownLatch.countDown() } } private[mesos] object MesosClusterDispatcher extends Logging { def main(args: Array[String]) { Utils.initDaemon(log) val conf = new SparkConf val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf) conf.setMaster(dispatcherArgs.masterUrl) conf.setAppName(dispatcherArgs.name) dispatcherArgs.zookeeperUrl.foreach { z => conf.set("spark.deploy.recoveryMode", "ZOOKEEPER") conf.set("spark.deploy.zookeeper.url", z) } val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf) dispatcher.start() logDebug("Adding shutdown hook") // force eager creation of logger ShutdownHookManager.addShutdownHook { () => logInfo("Shutdown hook is shutting down dispatcher") dispatcher.stop() dispatcher.awaitShutdown() } dispatcher.awaitShutdown() } }
Example 2
Source File: ExternalShuffleService.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import java.util.concurrent.CountDownLatch import scala.collection.JavaConverters._ import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.internal.Logging import org.apache.spark.metrics.MetricsSystem import org.apache.spark.network.TransportContext import org.apache.spark.network.netty.SparkTransportConf import org.apache.spark.network.sasl.SaslServerBootstrap import org.apache.spark.network.server.{TransportServer, TransportServerBootstrap} import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler import org.apache.spark.network.util.TransportConf import org.apache.spark.util.{ShutdownHookManager, Utils} private[spark] def main( args: Array[String], newShuffleService: (SparkConf, SecurityManager) => ExternalShuffleService): Unit = { Utils.initDaemon(log) val sparkConf = new SparkConf Utils.loadDefaultSparkProperties(sparkConf) val securityManager = new SecurityManager(sparkConf) // we override this value since this service is started from the command line // and we assume the user really wants it to be running sparkConf.set("spark.shuffle.service.enabled", "true") server = newShuffleService(sparkConf, securityManager) server.start() logDebug("Adding shutdown hook") // force eager creation of logger ShutdownHookManager.addShutdownHook { () => logInfo("Shutting down shuffle service.") server.stop() barrier.countDown() } // keep running until the process is terminated barrier.await() } }
Example 3
Source File: MesosClusterDispatcher.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos import java.util.concurrent.CountDownLatch import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.deploy.mesos.config._ import org.apache.spark.deploy.mesos.ui.MesosClusterUI import org.apache.spark.deploy.rest.mesos.MesosRestServer import org.apache.spark.internal.Logging import org.apache.spark.scheduler.cluster.mesos._ import org.apache.spark.util.{ShutdownHookManager, Utils} private[mesos] class MesosClusterDispatcher( args: MesosClusterDispatcherArguments, conf: SparkConf) extends Logging { private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host) private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase() logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode) private val engineFactory = recoveryMode match { case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf) case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode) } private val scheduler = new MesosClusterScheduler(engineFactory, conf) private val server = new MesosRestServer(args.host, args.port, conf, scheduler) private val webUi = new MesosClusterUI( new SecurityManager(conf), args.webUiPort, conf, publicAddress, scheduler) private val shutdownLatch = new CountDownLatch(1) def start(): Unit = { webUi.bind() scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl) scheduler.start() server.start() } def awaitShutdown(): Unit = { shutdownLatch.await() } def stop(): Unit = { webUi.stop() server.stop() scheduler.stop() shutdownLatch.countDown() } } private[mesos] object MesosClusterDispatcher extends Logging { def main(args: Array[String]) { Utils.initDaemon(log) val conf = new SparkConf val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf) conf.setMaster(dispatcherArgs.masterUrl) conf.setAppName(dispatcherArgs.name) dispatcherArgs.zookeeperUrl.foreach { z => conf.set(RECOVERY_MODE, "ZOOKEEPER") conf.set(ZOOKEEPER_URL, z) } val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf) dispatcher.start() logDebug("Adding shutdown hook") // force eager creation of logger ShutdownHookManager.addShutdownHook { () => logInfo("Shutdown hook is shutting down dispatcher") dispatcher.stop() dispatcher.awaitShutdown() } dispatcher.awaitShutdown() } }
Example 4
Source File: MesosClusterDispatcher.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos import java.util.concurrent.CountDownLatch import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.deploy.mesos.config._ import org.apache.spark.deploy.mesos.ui.MesosClusterUI import org.apache.spark.deploy.rest.mesos.MesosRestServer import org.apache.spark.internal.Logging import org.apache.spark.scheduler.cluster.mesos._ import org.apache.spark.util.{ShutdownHookManager, Utils} private[mesos] class MesosClusterDispatcher( args: MesosClusterDispatcherArguments, conf: SparkConf) extends Logging { private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host) private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase() logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode) private val engineFactory = recoveryMode match { case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf) case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode) } private val scheduler = new MesosClusterScheduler(engineFactory, conf) private val server = new MesosRestServer(args.host, args.port, conf, scheduler) private val webUi = new MesosClusterUI( new SecurityManager(conf), args.webUiPort, conf, publicAddress, scheduler) private val shutdownLatch = new CountDownLatch(1) def start(): Unit = { webUi.bind() scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl) scheduler.start() server.start() } def awaitShutdown(): Unit = { shutdownLatch.await() } def stop(): Unit = { webUi.stop() server.stop() scheduler.stop() shutdownLatch.countDown() } } private[mesos] object MesosClusterDispatcher extends Logging { def main(args: Array[String]) { Utils.initDaemon(log) val conf = new SparkConf val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf) conf.setMaster(dispatcherArgs.masterUrl) conf.setAppName(dispatcherArgs.name) dispatcherArgs.zookeeperUrl.foreach { z => conf.set(RECOVERY_MODE, "ZOOKEEPER") conf.set(ZOOKEEPER_URL, z) } val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf) dispatcher.start() logDebug("Adding shutdown hook") // force eager creation of logger ShutdownHookManager.addShutdownHook { () => logInfo("Shutdown hook is shutting down dispatcher") dispatcher.stop() dispatcher.awaitShutdown() } dispatcher.awaitShutdown() } }
Example 5
Source File: MesosClusterDispatcher.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.mesos import java.util.concurrent.CountDownLatch import org.apache.spark.{SecurityManager, SparkConf} import org.apache.spark.deploy.mesos.config._ import org.apache.spark.deploy.mesos.ui.MesosClusterUI import org.apache.spark.deploy.rest.mesos.MesosRestServer import org.apache.spark.internal.Logging import org.apache.spark.scheduler.cluster.mesos._ import org.apache.spark.util.{CommandLineUtils, ShutdownHookManager, SparkUncaughtExceptionHandler, Utils} private[mesos] class MesosClusterDispatcher( args: MesosClusterDispatcherArguments, conf: SparkConf) extends Logging { private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host) private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase() logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode) private val engineFactory = recoveryMode match { case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf) case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode) } private val scheduler = new MesosClusterScheduler(engineFactory, conf) private val server = new MesosRestServer(args.host, args.port, conf, scheduler) private val webUi = new MesosClusterUI( new SecurityManager(conf), args.webUiPort, conf, publicAddress, scheduler) private val shutdownLatch = new CountDownLatch(1) def start(): Unit = { webUi.bind() scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl) scheduler.start() server.start() } def awaitShutdown(): Unit = { shutdownLatch.await() } def stop(): Unit = { webUi.stop() server.stop() scheduler.stop() shutdownLatch.countDown() } } private[mesos] object MesosClusterDispatcher extends Logging with CommandLineUtils { override def main(args: Array[String]) { Thread.setDefaultUncaughtExceptionHandler(new SparkUncaughtExceptionHandler) Utils.initDaemon(log) val conf = new SparkConf val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf) conf.setMaster(dispatcherArgs.masterUrl) conf.setAppName(dispatcherArgs.name) dispatcherArgs.zookeeperUrl.foreach { z => conf.set(RECOVERY_MODE, "ZOOKEEPER") conf.set(ZOOKEEPER_URL, z) } val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf) dispatcher.start() logDebug("Adding shutdown hook") // force eager creation of logger ShutdownHookManager.addShutdownHook { () => logInfo("Shutdown hook is shutting down dispatcher") dispatcher.stop() dispatcher.awaitShutdown() } dispatcher.awaitShutdown() } }