org.apache.curator.retry.ExponentialBackoffRetry Scala Examples
The following examples show how to use org.apache.curator.retry.ExponentialBackoffRetry.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SparkCuratorUtil.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 2
Source File: CuratorAwareTest.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.utils import org.apache.curator.framework.{CuratorFrameworkFactory, CuratorFramework} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.curator.test.TestingServer import org.scalatest.{BeforeAndAfterAll, FunSuite} import scala.reflect.ClassTag trait CuratorAwareTest extends FunSuite with BeforeAndAfterAll with ZookeeperServerAwareTest { private[this] var curator: Option[CuratorFramework] = None override protected def beforeAll(): Unit = { super.beforeAll() val retryPolicy = new ExponentialBackoffRetry(1000, 3) val curatorFramework = CuratorFrameworkFactory.newClient(testServer.getConnectString, retryPolicy) curatorFramework.start curator = Some(curatorFramework) } override protected def afterAll(): Unit = { curator.foreach(_.close()) super.afterAll() } protected def withCurator(fn: CuratorFramework => Unit): Unit = { curator.foreach(fn) } protected def produceWithCurator[T](fn: CuratorFramework => T) : T = { require(curator.isDefined,"Cannot produce with no curator defined!") fn(curator.get) } protected def checkError[T](fn: => Any)(implicit tag: ClassTag[T]): Unit = { try { fn throw new RuntimeException(s"expected ${tag.runtimeClass} , but no exceptions were thrown!") } catch { case UtilException(caught) => if(!tag.runtimeClass.isAssignableFrom(caught.getClass)) { throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${caught.getClass}, value=$caught") } case throwable: Throwable => throw new RuntimeException(s"expected ${tag.runtimeClass} , found ${throwable.getClass}", throwable) } } }
Example 3
Source File: SparkCuratorUtil.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 4
Source File: SparkCuratorUtil.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import scala.collection.JavaConversions._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} object SparkCuratorUtil extends Logging { val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 val ZK_SESSION_TIMEOUT_MILLIS = 60000 val RETRY_WAIT_MILLIS = 5000 val MAX_RECONNECT_ATTEMPTS = 3 def newClient(conf: SparkConf): CuratorFramework = { val ZK_URL = conf.get("spark.deploy.zookeeper.url") val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 5
Source File: DLock.scala From etl-light with MIT License | 5 votes |
package yamrcraft.etlite.utils import java.util.concurrent.TimeUnit import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.slf4j.LoggerFactory class DLock(zkConnect: String, lockFile: String, waitForLockSeconds: Int) { val logger = LoggerFactory.getLogger(this.getClass) private var zkClient: Option[CuratorFramework] = None private var lock: Option[InterProcessSemaphoreMutex] = None def tryLock(): Boolean = { require(lock.isEmpty, "lock can't be reused") logger.info("acquiring lock...") zkClient = Some(CuratorFrameworkFactory.newClient(zkConnect, new ExponentialBackoffRetry(1000, 3))) zkClient.get.start() lock = Some(new InterProcessSemaphoreMutex(zkClient.get, lockFile)) lock.get.acquire(waitForLockSeconds, TimeUnit.SECONDS) } def release() = { require(lock.nonEmpty, "lock wasn't acquired") logger.info("releasing lock") lock.foreach(_.release()) zkClient.foreach(_.close()) } } class FakeLock extends DLock("", "", 0) { override def tryLock() = true override def release() = {} }
Example 6
Source File: SparkCuratorUtil.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 7
Source File: SparkCuratorUtil.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConversions._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 8
Source File: EmbeddedZkServerSuite.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.kyuubi.ha.server import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.imps.CuratorFrameworkState import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.kyuubi.KyuubiFunSuite import org.apache.kyuubi.config.KyuubiConf import org.apache.kyuubi.service.ServiceState._ class EmbeddedZkServerSuite extends KyuubiFunSuite { test("embedded zookeeper server") { val zkServer = new EmbeddedZkServer() assert(zkServer.getConf == null) assert(zkServer.getName === zkServer.getClass.getSimpleName) assert(zkServer.getServiceState === LATENT) val conf = KyuubiConf() zkServer.initialize(conf) assert(zkServer.getConf === conf) assert(zkServer.getServiceState === INITIALIZED) assert(zkServer.getConnectString.endsWith("2181")) assert(zkServer.getStartTime === 0) zkServer.start() assert(zkServer.getServiceState === STARTED) assert(zkServer.getConnectString.endsWith("2181")) assert(zkServer.getStartTime !== 0) zkServer.stop() assert(zkServer.getServiceState === STOPPED) } test("connect test with embedded zookeeper") { val zkServer = new EmbeddedZkServer() zkServer.initialize(KyuubiConf()) zkServer.start() val zkClient = CuratorFrameworkFactory.builder() .connectString(zkServer.getConnectString) .sessionTimeoutMs(5000) .retryPolicy(new ExponentialBackoffRetry(1000, 3)) .build zkClient.start() assert(zkClient.getState === CuratorFrameworkState.STARTED) assert(zkClient.getZookeeperClient.blockUntilConnectedOrTimedOut()) } }
Example 9
Source File: ZookeeperFunSuite.scala From kyuubi with Apache License 2.0 | 5 votes |
package yaooqinn.kyuubi.ha import com.google.common.io.Files import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.curator.test.TestingServer import org.apache.spark.{KyuubiConf, KyuubiSparkUtil, SparkConf, SparkFunSuite} import org.apache.spark.KyuubiConf._ trait ZookeeperFunSuite extends SparkFunSuite{ var zkServer: TestingServer = _ var connectString: String = _ val conf = new SparkConf(loadDefaults = true) KyuubiSparkUtil.setupCommonConfig(conf) conf.set(KyuubiConf.FRONTEND_BIND_PORT.key, "0") var zooKeeperClient: CuratorFramework = _ override def beforeAll(): Unit = { zkServer = new TestingServer(2181, Files.createTempDir(), true) connectString = zkServer.getConnectString conf.set(HA_ZOOKEEPER_QUORUM.key, connectString) conf.set(HA_ZOOKEEPER_CONNECTION_BASESLEEPTIME.key, "100ms") conf.set(HA_ZOOKEEPER_SESSION_TIMEOUT.key, "15s") conf.set(HA_ZOOKEEPER_CONNECTION_MAX_RETRIES.key, "0") zooKeeperClient = CuratorFrameworkFactory.builder().connectString(connectString) .retryPolicy(new ExponentialBackoffRetry(1000, 3)) .build() zooKeeperClient.start() super.beforeAll() } override def afterAll(): Unit = { Option(zooKeeperClient).foreach(_.close()) Option(zkServer).foreach(_.stop()) System.clearProperty(HA_ZOOKEEPER_QUORUM.key) System.clearProperty(HA_ENABLED.key) super.afterAll() } }
Example 10
Source File: SparkCuratorUtil.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.spark.{Logging, SparkConf} import org.apache.zookeeper.KeeperException import scala.collection.JavaConversions._ private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, //zookeeper集群URL zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path)) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 11
Source File: SparkCuratorUtil.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.SparkConf import org.apache.spark.internal.Logging private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 12
Source File: SparkCuratorUtil.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy import scala.collection.JavaConverters._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException import org.apache.spark.{Logging, SparkConf} private[spark] object SparkCuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: SparkConf, zkUrlConf: String = "spark.deploy.zookeeper.url"): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }
Example 13
Source File: ZkClusterInitTest.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.cluster import akka.util.ByteString import com.typesafe.scalalogging.LazyLogging import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.CreateMode import org.squbs.cluster.test.{ZkClusterMultiActorSystemTestKit, ZkClusterTestHelper} import scala.language.implicitConversions class ZkClusterInitTest extends ZkClusterMultiActorSystemTestKit("ZkClusterInitTest") with LazyLogging with ZkClusterTestHelper { val par1 = ByteString("myPar1") val par2 = ByteString("myPar2") val par3 = ByteString("myPar3") implicit val log = logger implicit def string2ByteArray(s: String): Array[Byte] = s.toCharArray map (c => c.toByte) implicit def byteArray2String(array: Array[Byte]): String = array.map(_.toChar).mkString override def beforeAll(): Unit = { // Don't need to start the cluster for now // We preset the data in Zookeeper instead. val zkClient = CuratorFrameworkFactory.newClient( zkConfig.getString("zkCluster.connectionString"), new ExponentialBackoffRetry(ZkCluster.DEFAULT_BASE_SLEEP_TIME_MS, ZkCluster.DEFAULT_MAX_RETRIES) ) zkClient.start() zkClient.blockUntilConnected() implicit val zkClientWithNS = zkClient.usingNamespace(zkConfig.getString("zkCluster.namespace")) guarantee("/leader", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/members", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments/segment-0", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}", Some("myPar1"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}", Some("myPar2"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}", Some("myPar3"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/$$size", Some(3), CreateMode.PERSISTENT) zkClient.close() } "ZkCluster" should "list the partitions" in { startCluster() zkClusterExts foreach { case (_, ext) => ext tell (ZkListPartitions(ext.zkAddress), self) expectMsgType[ZkPartitions](timeout) } } "ZkCluster" should "load persisted partition information and sync across the cluster" in { zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par1), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par2), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par3), self) expectMsgType[ZkPartition](timeout).members should have size 3 } } "ZkCluster" should "list all the members across the cluster" in { val members = zkClusterExts.map(_._2.zkAddress).toSet zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryMembership, self) expectMsgType[ZkMembership](timeout).members should be (members) } } }
Example 14
Source File: Clients.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.cosmos.zookeeper import com.mesosphere.cosmos.model.ZooKeeperUri import java.nio.charset.StandardCharsets import org.apache.curator.framework.CuratorFramework import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.api.ACLProvider import org.apache.curator.retry.ExponentialBackoffRetry import org.slf4j.Logger import scala.annotation.tailrec import scala.collection.JavaConverters._ object Clients { val logger: Logger = org.slf4j.LoggerFactory.getLogger(getClass) val retries = 3 val baseSleepTimeMs = 1000 def createAndInitialize(zkUri: ZooKeeperUri): CuratorFramework = { createAndInitialize( zkUri = zkUri, zkCredentials = sys.env.get("ZOOKEEPER_USER").zip(sys.env.get("ZOOKEEPER_SECRET")).headOption ) } def createAndInitialize( zkUri: ZooKeeperUri, zkCredentials: Option[(String, String)] ): CuratorFramework = { val zkClientBuilder = CuratorFrameworkFactory .builder() .connectString(zkUri.connectString) .retryPolicy(new ExponentialBackoffRetry(baseSleepTimeMs, retries)) val authInfo = zkCredentials.map { case (user, secret) => ( s"$user:$secret".getBytes(StandardCharsets.UTF_8), CosmosAclProvider(user, secret) ) } authInfo.foreach { case (authBytes, aclProvider) => logger.info("Enabling authorization and ACL provider for ZooKeeper client") zkClientBuilder .authorization("digest", authBytes) .aclProvider(aclProvider) } val zkClient = zkClientBuilder.build // Start the client zkClient.start() authInfo.foreach { case (_, aclProvider) => updateAcls(zkUri, zkClient, aclProvider) } zkClient } private[this] def updateAcls( zkUri: ZooKeeperUri, zkClient: CuratorFramework, aclProvider: ACLProvider ): Unit = { updateAcls( zkClient, aclProvider, zkClient.getChildren.forPath(zkUri.path).asScala.toList.map(zkUri.path + "/" + _) ) } @tailrec private[this] def updateAcls( zkClient: CuratorFramework, aclProvider: ACLProvider, paths: List[String] ): Unit = { paths match { case path :: restOfPaths => logger.info("Updating ZNode ACL during initialization: {}", path) zkClient .setACL() .withACL(aclProvider.getAclForPath(path)) .forPath(path) val childrenPaths = zkClient.getChildren.forPath( path ).asScala.toList.map { child => path + "/" + child } updateAcls(zkClient, aclProvider, childrenPaths ++ restOfPaths) case Nil => // No paths left. Nothing to do. } } }
Example 15
Source File: CuratorUtil.scala From aloha with Apache License 2.0 | 5 votes |
package me.jrwang.aloha.scheduler.master.zookeeper import scala.collection.JavaConverters._ import me.jrwang.aloha.common.{AlohaConf, Logging} import me.jrwang.aloha.scheduler._ import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory} import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.KeeperException object CuratorUtil extends Logging { private val ZK_CONNECTION_TIMEOUT_MILLIS = 15000 private val ZK_SESSION_TIMEOUT_MILLIS = 60000 private val RETRY_WAIT_MILLIS = 5000 private val MAX_RECONNECT_ATTEMPTS = 3 def newClient( conf: AlohaConf, zkUrlConf: String = ZOOKEEPER_URL.key): CuratorFramework = { val ZK_URL = conf.get(zkUrlConf) val zk = CuratorFrameworkFactory.newClient(ZK_URL, ZK_SESSION_TIMEOUT_MILLIS, ZK_CONNECTION_TIMEOUT_MILLIS, new ExponentialBackoffRetry(RETRY_WAIT_MILLIS, MAX_RECONNECT_ATTEMPTS)) zk.start() zk } def mkdir(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) == null) { try { zk.create().creatingParentsIfNeeded().forPath(path) } catch { case nodeExist: KeeperException.NodeExistsException => // do nothing, ignore node existing exception. case e: Exception => throw e } } } def deleteRecursive(zk: CuratorFramework, path: String) { if (zk.checkExists().forPath(path) != null) { for (child <- zk.getChildren.forPath(path).asScala) { zk.delete().forPath(path + "/" + child) } zk.delete().forPath(path) } } }