org.apache.curator.framework.CuratorFrameworkFactory Scala Examples
The following examples show how to use org.apache.curator.framework.CuratorFrameworkFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SparkCuratorUtil.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConversions._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 2
Source File: CuratorUtil.scala From aloha with Apache License 2.0 | 5 votes |
package me.jrwang.aloha.scheduler.master.zookeeper import scala.collection.JavaConverters._ import me.jrwang.aloha.common.{AlohaConf, Logging} import me.jrwang.aloha.scheduler._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException object CuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: AlohaConf, zkUrlConf: String = ZOOKEEPER_URL.key): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 3
Source File: DruidEventBeam.scala From gimel with Apache License 2.0 | 5 votes |
package com.paypal.gimel.druid.util import com.metamx.tranquility.beam.{Beam, ClusteredBeamTuning} import com.metamx.tranquility.druid.{DruidBeams, DruidLocation, DruidRollup, SpecificDruidDimensions} import com.metamx.tranquility.spark.BeamFactory import com.metamx.tranquility.typeclass.Timestamper import io.druid.data.input.impl.TimestampSpec import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.BoundedExponentialBackoffRetry import org.joda.time.{DateTime, DateTimeZone, Period} import com.paypal.gimel.druid.conf.DruidClientConfiguration lazy val BeamInstance: Beam[Map[String, Any]] = { // Tranquility uses ZooKeeper (through Curator framework) for coordination. val curator = CuratorFrameworkFactory.newClient( druidConfig.zookeeper, new BoundedExponentialBackoffRetry(100, 3000, 5) ) curator.start() // Transforms List[DruidDimensions] from the DruidClientConfiguration to List[String] val dimensions = druidConfig .dimensions .map(_.name) // Transforms List[DruidMetrics] from the DruidClientConfiguration to List[AggregatorFactory] val aggregators = druidConfig .metrics .map(_.getAggregator) // Building a Druid Beam DruidBeams .builder() .curator(curator) .discoveryPath(druidConfig.discoveryPath) .location(DruidLocation.create(druidConfig.indexService, druidConfig.datasource)) .rollup(DruidRollup(SpecificDruidDimensions(dimensions), aggregators, DruidUtility.fetchQueryGranularity(druidConfig.queryGranularity))) .tuning( ClusteredBeamTuning ( segmentGranularity = druidConfig.segmentGranularity, windowPeriod = new Period(druidConfig.windowPeriod), partitions = druidConfig.numPartitions, replicants = druidConfig.numReplicants ) ) .timestampSpec(new TimestampSpec(druidConfig.timestamp_field, "iso", null)) .buildBeam() } } class DruidEventBeam(config: DruidClientConfiguration) extends BeamFactory[Map[String, Any]] { // Return a singleton, so the same connection is shared across all tasks in the same JVM. def makeBeam: Beam[Map[String, Any]] = { DruidEventBeam.init(config) DruidEventBeam.BeamInstance } }
Example 4
Source File: Clients.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.cosmos.zookeeper import com.mesosphere.cosmos.model.ZooKeeperUri import java.nio.charset.StandardCharsets import org.apache.curator.framework.CuratorFramework import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.api.ACLProvider import org.apache.curator.retry.ExponentialBackoffRetry import org.slf4j.Logger import scala.annotation.tailrec import scala.collection.JavaConverters._ object Clients { val logger: Logger = org.slf4j.LoggerFactory.getLogger(getClass) val retries = 3 val baseSleepTimeMs = 1000 def createAndInitialize(zkUri: ZooKeeperUri): CuratorFramework = { createAndInitialize( zkUri = zkUri, zkCredentials = sys.env.get("ZOOKEEPER_USER").zip(sys.env.get("ZOOKEEPER_SECRET")).headOption ) } def createAndInitialize( zkUri: ZooKeeperUri, zkCredentials: Option[(String, String)] ): CuratorFramework = { val zkClientBuilder = CuratorFrameworkFactory .builder() .connectString(zkUri.connectString) .retryPolicy(new ExponentialBackoffRetry(baseSleepTimeMs, retries)) val authInfo = zkCredentials.map { case (user, secret) => ( s"$user:$secret".getBytes(StandardCharsets.UTF_8), CosmosAclProvider(user, secret) ) } authInfo.foreach { case (authBytes, aclProvider) => logger.info("Enabling authorization and ACL provider for ZooKeeper client") zkClientBuilder .authorization("digest", authBytes) .aclProvider(aclProvider) } val zkClient = zkClientBuilder.build // Start the client zkClient.start() authInfo.foreach { case (_, aclProvider) => updateAcls(zkUri, zkClient, aclProvider) } zkClient } private[this] def updateAcls( zkUri: ZooKeeperUri, zkClient: CuratorFramework, aclProvider: ACLProvider ): Unit = { updateAcls( zkClient, aclProvider, zkClient.getChildren.forPath(zkUri.path).asScala.toList.map(zkUri.path + "/" + _) ) } @tailrec private[this] def updateAcls( zkClient: CuratorFramework, aclProvider: ACLProvider, paths: List[String] ): Unit = { paths match { case path :: restOfPaths => logger.info("Updating ZNode ACL during initialization: {}", path) zkClient .setACL() .withACL(aclProvider.getAclForPath(path)) .forPath(path) val childrenPaths = zkClient.getChildren.forPath( path ).asScala.toList.map { child => path + "/" + child } updateAcls(zkClient, aclProvider, childrenPaths ++ restOfPaths) case Nil => // No paths left. Nothing to do. } } }
Example 5
Source File: ZkClusterInitTest.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.cluster import akka.util.ByteString import com.typesafe.scalalogging.LazyLogging import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.CreateMode import org.squbs.cluster.test.{ZkClusterMultiActorSystemTestKit, ZkClusterTestHelper} import scala.language.implicitConversions class ZkClusterInitTest extends ZkClusterMultiActorSystemTestKit("ZkClusterInitTest") with LazyLogging with ZkClusterTestHelper { val par1 = ByteString("myPar1") val par2 = ByteString("myPar2") val par3 = ByteString("myPar3") implicit val log = logger implicit def string2ByteArray(s: String): Array[Byte] = s.toCharArray map (c => c.toByte) implicit def byteArray2String(array: Array[Byte]): String = array.map(_.toChar).mkString override def beforeAll(): Unit = { // Don't need to start the cluster for now // We preset the data in Zookeeper instead. val zkClient = CuratorFrameworkFactory.newClient( zkConfig.getString("zkCluster.connectionString"), new ExponentialBackoffRetry(ZkCluster.DEFAULT_BASE_SLEEP_TIME_MS, ZkCluster.DEFAULT_MAX_RETRIES) ) zkClient.start() zkClient.blockUntilConnected() implicit val zkClientWithNS = zkClient.usingNamespace(zkConfig.getString("zkCluster.namespace")) guarantee("/leader", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/members", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments/segment-0", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}", Some("myPar1"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}", Some("myPar2"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}", Some("myPar3"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/$$size", Some(3), CreateMode.PERSISTENT) zkClient.close() } "ZkCluster" should "list the partitions" in { startCluster() zkClusterExts foreach { case (_, ext) => ext tell (ZkListPartitions(ext.zkAddress), self) expectMsgType[ZkPartitions](timeout) } } "ZkCluster" should "load persisted partition information and sync across the cluster" in { zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par1), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par2), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par3), self) expectMsgType[ZkPartition](timeout).members should have size 3 } } "ZkCluster" should "list all the members across the cluster" in { val members = zkClusterExts.map(_._2.zkAddress).toSet zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryMembership, self) expectMsgType[ZkMembership](timeout).members should be (members) } } }
Example 6
Source File: SparkCuratorUtil.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 7
Source File: SparkCuratorUtil.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 8
Source File: SparkCuratorUtil.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.spark.{Logging, SparkConf} import org.apache.zookeeper.KeeperException import scala.collection.JavaConversions._ private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, //zookeeper集群URL zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 9
Source File: ZookeeperFunSuite.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.ha import com.google.common.io.Files import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.curator.test.TestingServer import org.apache.spark.{KyuubiConf, KyuubiSparkUtil, SparkConf, SparkFunSuite} import org.apache.spark.KyuubiConf._ trait ZookeeperFunSuite extends SparkFunSuite{ var zkServer: TestingServer = _ var connectString: String = _ val conf = new SparkConf(loadDefaults = true) KyuubiSparkUtil.setupCommonConfig(conf) conf.set(KyuubiConf.FRONTEND_BIND_PORT.key, "0") var zooKeeperClient: CuratorFramework = _ override def beforeAll(): Unit = { zkServer = new TestingServer(2181, Files.createTempDir(), true) connectString = zkServer.getConnectString conf.set(HA_ZOOKEEPER_QUORUM.key, connectString) conf.set(HA_ZOOKEEPER_CONNECTION_BASESLEEPTIME.key, "100ms") conf.set(HA_ZOOKEEPER_SESSION_TIMEOUT.key, "15s") conf.set(HA_ZOOKEEPER_CONNECTION_MAX_RETRIES.key, "0") zooKeeperClient = CuratorFrameworkFactory.builder().connectString(connectString) .retryPolicy(new ExponentialBackoffRetry(1000, 3)) .build() zooKeeperClient.start() super.beforeAll() } override def afterAll(): Unit = { Option(zooKeeperClient).foreach(_.close()) Option(zkServer).foreach(_.stop()) System.clearProperty(HA_ZOOKEEPER_QUORUM.key) System.clearProperty(HA_ENABLED.key) super.afterAll() } }
Example 10
Source File: EmbeddedZkServerSuite.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.kyuubi.ha.server import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.imps.CuratorFrameworkState import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.service.ServiceState._ class EmbeddedZkServerSuite extends KyuubiFunSuite { test("embedded zookeeper server") { val zkServer = new EmbeddedZkServer() assert(zkServer.getConf == null) assert(zkServer.getName === zkServer.getClass.getSimpleName) assert(zkServer.getServiceState === LATENT) val conf = KyuubiConf() zkServer.initialize(conf) assert(zkServer.getConf === conf) assert(zkServer.getServiceState === INITIALIZED) assert(zkServer.getConnectString.endsWith("2181")) assert(zkServer.getStartTime === 0) zkServer.start() assert(zkServer.getServiceState === STARTED) assert(zkServer.getConnectString.endsWith("2181")) assert(zkServer.getStartTime !== 0) zkServer.stop() assert(zkServer.getServiceState === STOPPED) } test("connect test with embedded zookeeper") { val zkServer = new EmbeddedZkServer() zkServer.initialize(KyuubiConf()) zkServer.start() val zkClient = CuratorFrameworkFactory.builder() .connectString(zkServer.getConnectString) .sessionTimeoutMs(5000) .retryPolicy(new ExponentialBackoffRetry(1000, 3)) .build zkClient.start() assert(zkClient.getState === CuratorFrameworkState.STARTED) assert(zkClient.getZookeeperClient.blockUntilConnectedOrTimedOut()) } }
Example 11
Source File: SparkCuratorUtil.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 12
Source File: SparkCuratorUtil.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 13
Source File: DLock.scala From etl-light with MIT License | 5 votes |
package yamrcraft.etlite.utils import java.util.concurrent.TimeUnit import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.slf4j.LoggerFactory class DLock(zkConnect: String, lockFile: String, waitForLockSeconds: Int) { val logger = LoggerFactory.getLogger(this.getClass) private var zkClient: Option[CuratorFramework] = None private var lock: Option[InterProcessSemaphoreMutex] = None def tryLock(): Boolean = { require(lock.isEmpty, "lock can't be reused") logger.info("acquiring lock...") zkClient = Some(CuratorFrameworkFactory.newClient(zkConnect, new ExponentialBackoffRetry(1000, 3))) zkClient.get.start() lock = Some(new InterProcessSemaphoreMutex(zkClient.get, lockFile)) lock.get.acquire(waitForLockSeconds, TimeUnit.SECONDS) } def release() = { require(lock.nonEmpty, "lock wasn't acquired") logger.info("releasing lock") lock.foreach(_.release()) zkClient.foreach(_.close()) } } class FakeLock extends DLock("", "", 0) { override def tryLock() = true override def release() = {} }
Example 14
Source File: SparkCuratorUtil.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import scala.collection.JavaConversions._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} object SparkCuratorUtil extends Logging { val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 val ZK_SESSION_TIMEOUT_MILLIS = 60000 val RETRY_WAIT_MILLIS = 5000 val MAX_RECONNECT_ATTEMPTS = 3 def newClient(conf: SparkConf): CuratorFramework = { val ZK_URL = conf.get("spark.deploy.zookeeper.url") val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 15
Source File: SparkCuratorUtil.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 16
Source File: ZooKeeperManager.scala From incubator-livy with Apache License 2.0 | 5 votes |
package org.apache.livy.server.recovery import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.api.UnhandledErrorListener import org.apache.curator.framework.CuratorFramework import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.RetryNTimes import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.livy.LivyConf import org.apache.livy.Logging import org.apache.livy.utils.LivyUncaughtException class ZooKeeperManager( livyConf: LivyConf, mockCuratorClient: Option[CuratorFramework] = None) extends JsonMapper with Logging { def this(livyConf: LivyConf) { this(livyConf, None) } private val zkAddress = Option(livyConf.get(LivyConf.ZOOKEEPER_URL)). orElse(Option(livyConf.get(LivyConf.RECOVERY_STATE_STORE_URL))). map(_.trim).orNull require(zkAddress != null && !zkAddress.isEmpty, s"Please config ${LivyConf.ZOOKEEPER_URL.key}.") private val retryValue = Option(livyConf.get(LivyConf.ZK_RETRY_POLICY)). orElse(Option(livyConf.get(LivyConf.RECOVERY_ZK_STATE_STORE_RETRY_POLICY))). map(_.trim).orNull require(retryValue != null && !retryValue.isEmpty, s"Please config ${LivyConf.ZK_RETRY_POLICY.key}.") // a regex to match patterns like "m, n" where m and n both are integer values private val retryPattern = """\s*(\d+)\s*,\s*(\d+)\s*""".r private[recovery] val retryPolicy = retryValue match { case retryPattern(n, sleepMs) => new RetryNTimes(n.toInt, sleepMs.toInt) case _ => throw new IllegalArgumentException( s"contains bad value: $retryValue. " + "Correct format is <max retry count>,<sleep ms between retry>. e.g. 5,100") } private val curatorClient = mockCuratorClient.getOrElse { CuratorFrameworkFactory.newClient(zkAddress, retryPolicy) } curatorClient.getUnhandledErrorListenable().addListener(new UnhandledErrorListener { def unhandledError(message: String, e: Throwable): Unit = { error(s"Fatal Zookeeper error: ${message}.", e) throw new LivyUncaughtException(e.getMessage) } }) def start(): Unit = { curatorClient.start() } def stop(): Unit = { curatorClient.close() } // TODO Make sure ZK path has proper secure permissions so that other users cannot read its // contents. def set(key: String, value: Object): Unit = { val data = serializeToBytes(value) if (curatorClient.checkExists().forPath(key) == null) { curatorClient.create().creatingParentsIfNeeded().forPath(key, data) } else { curatorClient.setData().forPath(key, data) } } def get[T: ClassTag](key: String): Option[T] = { if (curatorClient.checkExists().forPath(key) == null) { None } else { Option(deserialize[T](curatorClient.getData().forPath(key))) } } def getChildren(key: String): Seq[String] = { if (curatorClient.checkExists().forPath(key) == null) { Seq.empty[String] } else { curatorClient.getChildren.forPath(key).asScala } } def remove(key: String): Unit = { try { curatorClient.delete().guaranteed().forPath(key) } catch { case _: NoNodeException => warn(s"Fail to remove non-existed zookeeper node: ${key}") } } }
Example 17
Source File: CuratorAwareTest.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.utils import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.curator.test.TestingServer import org.scalatest.{BeforeAndAfterAll, FunSuite} import scala.reflect.ClassTag trait CuratorAwareTest extends FunSuite with BeforeAndAfterAll with ZookeeperServerAwareTest { private[this] var curator: Option[CuratorFramework] = None override protected def beforeAll(): Unit = { super.beforeAll() val retryPolicy = new ExponentialBackoffRetry(1000, 3) val curatorFramework = CuratorFrameworkFactory.newClient(testServer.getConnectString, retryPolicy) curatorFramework.start curator = Some(curatorFramework) } override protected def afterAll(): Unit = { curator.foreach(_.close()) super.afterAll() } protected def withCurator(fn: CuratorFramework => Unit): Unit = { curator.foreach(fn) } protected def produceWithCurator[T](fn: CuratorFramework => T) : T = { require(curator.isDefined,"Cannot produce with no curator defined!") fn(curator.get) } protected def checkError[T](fn: => Any)(implicit tag: ClassTag[T]): Unit = { try { fn throw new RuntimeException(s"expected ${tag.runtimeClass} , but no exceptions were thrown!") } catch { case UtilException(caught) => if(!tag.runtimeClass.isAssignableFrom(caught.getClass)) { throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${caught.getClass}, value=$caught") } case throwable: Throwable => throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${throwable.getClass}", throwable) } } }
Example 18
Source File: KafkaServerInTest.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.utils import kafka.manager.model.CuratorConfig import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework} import org.apache.curator.retry.BoundedExponentialBackoffRetry import org.scalatest.{BeforeAndAfterAll, FunSuite} trait KafkaServerInTest extends FunSuite with BeforeAndAfterAll { val kafkaServerZkPath : String lazy val sharedCurator: CuratorFramework = { val config = CuratorConfig(kafkaServerZkPath) val curator: CuratorFramework = CuratorFrameworkFactory.newClient( config.zkConnect, new BoundedExponentialBackoffRetry(config.baseSleepTimeMs, config.maxSleepTimeMs, config.zkMaxRetry)) curator } override protected def beforeAll(): Unit = { super.beforeAll() sharedCurator.start() } override protected def afterAll(): Unit = { sharedCurator.close() super.afterAll() } }
Example 19
Source File: CuratorAwareActor.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.base import akka.actor.ActorLogging import kafka.manager.model.CuratorConfig import org.apache.curator.RetrySleeper import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.BoundedExponentialBackoffRetry import scala.util.Try class LoggingRetryPolicy(curatorConfig: CuratorConfig, actorLogging: ActorLogging ) extends BoundedExponentialBackoffRetry(curatorConfig.baseSleepTimeMs , curatorConfig.maxSleepTimeMs, curatorConfig.zkMaxRetry) { override def allowRetry(retryCount: Int, elapsedTimeMs: Long, sleeper: RetrySleeper): Boolean = { actorLogging.log.info(s"retryCount=$retryCount maxRetries=${curatorConfig.zkMaxRetry} zkConnect=${curatorConfig.zkConnect}") super.allowRetry(retryCount, elapsedTimeMs, sleeper) } } trait CuratorAwareActor extends BaseActor { protected def curatorConfig: CuratorConfig protected[this] val curator : CuratorFramework = getCurator(curatorConfig) log.info("Starting curator...") curator.start() protected def getCurator(config: CuratorConfig) : CuratorFramework = { val curator: CuratorFramework = CuratorFrameworkFactory.newClient( config.zkConnect, new LoggingRetryPolicy(config, this)) curator } @scala.throws[Exception](classOf[Exception]) override def postStop(): Unit = { log.info("Shutting down curator...") Try(curator.close()) super.postStop() } } trait BaseZkPath { this : CuratorAwareActor => protected def baseZkPath : String protected def zkPath(path: String): String = { require(path.nonEmpty, "path must be nonempty") "%s/%s" format(baseZkPath, path) } protected def zkPathFrom(parent: String,child: String): String = { require(parent.nonEmpty, "parent path must be nonempty") require(child.nonEmpty, "child path must be nonempty") "%s/%s" format(parent, child) } }
Example 20
Source File: InstanceIdAssigner.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.invoker import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.recipes.shared.SharedCount import org.apache.curator.retry.RetryUntilElapsed import org.apache.openwhisk.common.Logging private[invoker] class InstanceIdAssigner(connectionString: String)(implicit logger: Logging) { def getId(name: String): Int = { logger.info(this, s"invokerReg: creating zkClient to $connectionString") val retryPolicy = new RetryUntilElapsed(5000, 500) // retry at 500ms intervals until 5 seconds have elapsed val zkClient = CuratorFrameworkFactory.newClient(connectionString, retryPolicy) zkClient.start() zkClient.blockUntilConnected() logger.info(this, "invokerReg: connected to zookeeper") val myIdPath = "/invokers/idAssignment/mapping/" + name val assignedId = Option(zkClient.checkExists().forPath(myIdPath)) match { case None => // path doesn't exist -> no previous mapping for this invoker logger.info(this, s"invokerReg: no prior assignment of id for invoker $name") val idCounter = new SharedCount(zkClient, "/invokers/idAssignment/counter", 0) idCounter.start() def assignId(): Int = { val current = idCounter.getVersionedValue() if (idCounter.trySetCount(current, current.getValue() + 1)) { current.getValue() } else { assignId() } } val newId = assignId() idCounter.close() zkClient.create().creatingParentContainersIfNeeded().forPath(myIdPath, BigInt(newId).toByteArray) logger.info(this, s"invokerReg: invoker $name was assigned invokerId $newId") newId case Some(_) => // path already exists -> there is a previous mapping for this invoker we should use val rawOldId = zkClient.getData().forPath(myIdPath) val oldId = BigInt(rawOldId).intValue logger.info(this, s"invokerReg: invoker $name was assigned its previous invokerId $oldId") oldId } zkClient.close() assignedId } }