org.apache.zookeeper.CreateMode Scala Examples
The following examples show how to use org.apache.zookeeper.CreateMode.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MesosClusterPersistenceEngine.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConversions._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.{Logging, SparkConf} import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).map(fetch[T]).flatten } }
Example 2
Source File: ZooKeeperPersistenceEngine.scala From aloha with Apache License 2.0 | 5 votes |
package me.jrwang.aloha.scheduler.master.zookeeper import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import me.jrwang.aloha.common.{AlohaConf, Logging} import me.jrwang.aloha.scheduler._ import me.jrwang.aloha.scheduler.master.PersistenceEngine import me.jrwang.aloha.rpc.serializer.Serializer import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode private[master] class ZooKeeperPersistenceEngine( conf: AlohaConf, val serializer: Serializer ) extends PersistenceEngine with Logging { private val workingDir = conf.get(ZOOKEEPER_DIRECTORY).getOrElse("/aloha") + "/master_status" private val zk: CuratorFramework = CuratorUtil.newClient(conf) CuratorUtil.mkdir(zk, workingDir) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(workingDir + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(workingDir + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(workingDir).asScala .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T](_)) } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData.forPath(workingDir + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(workingDir + "/" + filename) None } } }
Example 3
Source File: ZkClusterInitTest.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.cluster import akka.util.ByteString import com.typesafe.scalalogging.LazyLogging import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.CreateMode import org.squbs.cluster.test.{ZkClusterMultiActorSystemTestKit, ZkClusterTestHelper} import scala.language.implicitConversions class ZkClusterInitTest extends ZkClusterMultiActorSystemTestKit("ZkClusterInitTest") with LazyLogging with ZkClusterTestHelper { val par1 = ByteString("myPar1") val par2 = ByteString("myPar2") val par3 = ByteString("myPar3") implicit val log = logger implicit def string2ByteArray(s: String): Array[Byte] = s.toCharArray map (c => c.toByte) implicit def byteArray2String(array: Array[Byte]): String = array.map(_.toChar).mkString override def beforeAll(): Unit = { // Don't need to start the cluster for now // We preset the data in Zookeeper instead. val zkClient = CuratorFrameworkFactory.newClient( zkConfig.getString("zkCluster.connectionString"), new ExponentialBackoffRetry(ZkCluster.DEFAULT_BASE_SLEEP_TIME_MS, ZkCluster.DEFAULT_MAX_RETRIES) ) zkClient.start() zkClient.blockUntilConnected() implicit val zkClientWithNS = zkClient.usingNamespace(zkConfig.getString("zkCluster.namespace")) guarantee("/leader", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/members", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee("/segments/segment-0", Some(Array[Byte]()), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}", Some("myPar1"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par1)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}", Some("myPar2"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par2)}/$$size", Some(3), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}", Some("myPar3"), CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/servants", None, CreateMode.PERSISTENT) guarantee(s"/segments/segment-0/${keyToPath(par3)}/$$size", Some(3), CreateMode.PERSISTENT) zkClient.close() } "ZkCluster" should "list the partitions" in { startCluster() zkClusterExts foreach { case (_, ext) => ext tell (ZkListPartitions(ext.zkAddress), self) expectMsgType[ZkPartitions](timeout) } } "ZkCluster" should "load persisted partition information and sync across the cluster" in { zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par1), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par2), self) expectMsgType[ZkPartition](timeout).members should have size 3 } zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryPartition(par3), self) expectMsgType[ZkPartition](timeout).members should have size 3 } } "ZkCluster" should "list all the members across the cluster" in { val members = zkClusterExts.map(_._2.zkAddress).toSet zkClusterExts foreach { case (_, ext) => ext tell (ZkQueryMembership, self) expectMsgType[ZkMembership](timeout).members should be (members) } } }
Example 4
Source File: package.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs import java.net.{URLDecoder, URLEncoder} import java.nio.ByteBuffer import java.nio.charset.Charset import akka.actor.{Address, AddressFromURIString} import akka.util.ByteString import com.typesafe.scalalogging.Logger import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NodeExistsException import scala.language.implicitConversions import scala.util.Try import scala.util.control.NonFatal import scala.collection.JavaConverters._ package object cluster { trait SegmentationLogic { val segmentsSize:Int def segmentation(partitionKey:ByteString): String = s"segment-${Math.abs(partitionKey.hashCode()) % segmentsSize}" def partitionZkPath(partitionKey:ByteString): String = s"/segments/${segmentation(partitionKey)}/${keyToPath(partitionKey)}" def sizeOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/$$size" def servantsOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/servants" } case class DefaultSegmentationLogic(segmentsSize:Int) extends SegmentationLogic def guarantee(path:String, data:Option[Array[Byte]], mode:CreateMode = CreateMode.EPHEMERAL) (implicit zkClient:CuratorFramework, logger:Logger):String = { try{ data match { case None => zkClient.create.withMode(mode).forPath(path) case Some(bytes) => zkClient.create.withMode(mode).forPath(path, bytes) } } catch{ case e: NodeExistsException => if(data.nonEmpty && data.get.length > 0){ zkClient.setData().forPath(path, data.get) } path case NonFatal(e) => logger.info("leader znode creation failed due to %s\n", e) path } } def safelyDiscard(path:String, recursive: Boolean = true)(implicit zkClient: CuratorFramework): String = Try { if(recursive) zkClient.getChildren.forPath(path).asScala.foreach(child => safelyDiscard(s"$path/$child", recursive)) zkClient.delete.forPath(path) path } getOrElse path def keyToPath(name:String):String = URLEncoder.encode(name, "utf-8") def pathToKey(name:String):String = URLDecoder.decode(name, "utf-8") private[cluster] val BYTES_OF_INT = Integer.SIZE / java.lang.Byte.SIZE implicit def intToBytes(integer:Int):Array[Byte] = { val buf = ByteBuffer.allocate(BYTES_OF_INT) buf.putInt(integer) buf.rewind buf.array() } val UTF_8 = Charset.forName("utf-8") implicit class ByteConversions(val bytes: Array[Byte]) extends AnyVal { def toAddress: Option[Address] = Option(bytes) flatMap (b => if (b.length <= 0) None else Some(AddressFromURIString(new String(b, UTF_8)))) def toInt: Int = ByteBuffer.wrap(bytes).getInt def toUtf8: String = new String(bytes, UTF_8) def toByteString: ByteString = ByteString(bytes) def toAddressSet: Set[Address] = Try { new String(bytes, UTF_8).split("[,]").map(seg => AddressFromURIString(seg.trim)).toSet } getOrElse Set.empty } implicit def byteStringToUtf8(bs:ByteString):String = new String(bs.toArray, UTF_8) implicit def addressToBytes(address:Address):Array[Byte] = { address.toString.getBytes(UTF_8) } implicit def addressSetToBytes(members: Set[Address]): Array[Byte] = { members.mkString(",").getBytes(UTF_8) } }
Example 5
Source File: MesosClusterPersistenceEngine.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.{Logging, SparkConf} import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T]) } }
Example 6
Source File: ZooKeeperPersistenceEngine.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.{Logging, SparkConf} import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(WORKING_DIR).asScala .filter(_.startsWith(prefix)).map(deserializeFromFile[T]).flatten } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } } }
Example 7
Source File: ZooKeeperPersistenceEngine.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(WORKING_DIR).asScala .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T]) } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } }
Example 8
Source File: MesosClusterPersistenceEngine.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T]) } }
Example 9
Source File: ZooKeeperPersistenceEngine.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConversions._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.{Logging, SparkConf} import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { //zooKeeper保存恢复状态的目录,缺省为/spark private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix)) file.map(deserializeFromFile[T]).flatten } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } } }
Example 10
Source File: CoordinatorZk.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.core.cluster import java.util import akka.actor.{ActorPath, ActorSystem} import com.typesafe.config.Config import io.amient.affinity.core.cluster.Coordinator.CoordinatorConf import io.amient.affinity.core.cluster.CoordinatorZk.CoordinatorZkConf import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.util.{ZkClients, ZkConf} import org.I0Itec.zkclient.IZkChildListener import org.apache.zookeeper.CreateMode import scala.collection.JavaConverters._ object CoordinatorZk { object CoordinatorZkConf extends CoordinatorZkConf { override def apply(config: Config) = new CoordinatorZkConf()(config) } class CoordinatorZkConf extends CfgStruct[CoordinatorZkConf](classOf[CoordinatorConf]) { val ZooKeeper = struct("zookeeper", new ZkConf, true) val ZkRoot = string("zookeeper.root", "/affinity") .doc("znode under which coordination data between affinity nodes will be registered") } } class CoordinatorZk(system: ActorSystem, group: String, _conf: CoordinatorConf) extends Coordinator(system, group) { val conf = CoordinatorZkConf(_conf) val zkConf = conf.ZooKeeper() val zkRoot = conf.ZkRoot() val groupRoot = s"$zkRoot/${system.name}/$group/online" val peersRoot = s"$zkRoot/${system.name}/$group/peers" private val zk = ZkClients.get(zkConf) if (!zk.exists(groupRoot)) zk.createPersistent(groupRoot, true) updateChildren(zk.subscribeChildChanges(groupRoot, new IZkChildListener() { override def handleChildChange(parentPath: String, children: util.List[String]): Unit = { updateChildren(children) } })) override def register(actorPath: ActorPath): String = { zk.create(s"$groupRoot/", actorPath.toString(), CreateMode.EPHEMERAL_SEQUENTIAL) } override def unregister(handle: String) = zk.delete(handle) override def close(): Unit = if (!closed.get) { super.close() ZkClients.close(zk); } private def listAsIndexedSeq(list: util.List[String]) = list.asScala.toIndexedSeq private def updateChildren(children: util.List[String]): Unit = { if (children != null) { val newHandles = listAsIndexedSeq(children).map(id => s"$groupRoot/$id") val newState = newHandles.map(handle => (handle, zk.readData[String](handle))).toMap updateGroup(newState) } } override def registerPeer(akkaAddress: String, knownZid: Option[String]): String = { if (!zk.exists(peersRoot)) zk.createPersistent(peersRoot, true) val nodes = zk.getChildren(peersRoot).asScala.map(i => (i, zk.readData[String](s"$peersRoot/$i"))) val zid: String = knownZid.flatMap { id => nodes.find(_._1 == id) match { case Some((_, prevAkkaAddress)) if (prevAkkaAddress == akkaAddress) => Some(id) case Some(_) => zk.writeData(s"$peersRoot/$id", akkaAddress); Some(id) case None => None } } getOrElse { nodes.find(_._2 == akkaAddress) match { case Some((id, _)) => id case None => zk.create(s"$peersRoot/", akkaAddress, CreateMode.PERSISTENT_SEQUENTIAL).substring(peersRoot.length+1) } } def update(zids: util.List[String]) = updatePeers(zids.asScala.toList) try zid finally update(zk.subscribeChildChanges(peersRoot, new IZkChildListener() { override def handleChildChange(parentPath: String, zids: util.List[String]): Unit = update(zids) })) } }
Example 11
Source File: ZookeeperSchemaRegistry.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.avro import com.typesafe.config.Config import io.amient.affinity.avro.ZookeeperSchemaRegistry.ZkAvroConf import io.amient.affinity.avro.record.AvroSerde import io.amient.affinity.avro.record.AvroSerde.AvroConf import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.util.{ZkClients, ZkConf} import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.ZkNodeExistsException import org.apache.avro.Schema import org.apache.zookeeper.CreateMode import scala.collection.JavaConverters._ object ZookeeperSchemaRegistry { object ZkAvroConf extends ZkAvroConf { override def apply(config: Config) = new ZkAvroConf().apply(config) } class ZkAvroConf extends CfgStruct[ZkAvroConf](classOf[AvroConf]) { val ZooKeeper = struct("schema.registry.zookeeper", new ZkConf, true) val ZkRoot = string("schema.registry.zookeeper.root", "/affinity-schema-registry") .doc("znode under which schemas will be stored") } } class ZookeeperSchemaRegistry(zkRoot: String, zk: ZkClient) extends AvroSerde with AvroSchemaRegistry { def this(conf: ZkAvroConf) = this(conf.ZkRoot(), { val zk = ZkClients.get(conf.ZooKeeper) val zkRoot = conf.ZkRoot() if (!zk.exists(zkRoot)) zk.createPersistent(zkRoot) val zkSchemas = s"$zkRoot/schemas" if (!zk.exists(zkSchemas)) zk.createPersistent(zkSchemas) val zkSubjects = s"$zkRoot/subjects" if (!zk.exists(zkSubjects)) zk.createPersistent(zkSubjects) zk }) def this(_conf: AvroConf) = this { new ZkAvroConf().apply(_conf) } override def close(): Unit = ZkClients.close(zk) override protected def registerSchema(subject: String, schema: Schema): Int = hypersynchronized { val zkSubject = s"$zkRoot/subjects/$subject" val zkSchemas = s"$zkRoot/schemas" val versions: Map[Schema, Int] = if (!zk.exists(zkSubject)) Map.empty else { zk.readData[String](zkSubject) match { case some => some.split(",").toList.map(_.toInt).map { case id => getSchema(id) -> id }.toMap } } versions.get(schema).getOrElse { validator.validate(schema, versions.map(_._1).asJava) val schemaPath = zk.create(s"$zkSchemas/", schema.toString(true), CreateMode.PERSISTENT_SEQUENTIAL) val id = schemaPath .substring(zkSchemas.length + 1).toInt val updatedVersions = versions.map(_._2).toList :+ id if (zk.exists(zkSubject)) { zk.writeData(zkSubject, updatedVersions.mkString(",")) } else { zk.create(zkSubject, updatedVersions.mkString(","), CreateMode.PERSISTENT) } id } } private def hypersynchronized[X](f: => X): X = synchronized { val lockPath = zkRoot + "/lock" var acquired = 0 do { try { zk.createEphemeral(lockPath) acquired = 1 } catch { case _: ZkNodeExistsException => acquired -= 1 if (acquired < -100) { throw new IllegalStateException("Could not acquire zk registry lock") } else { Thread.sleep(500) } } } while (acquired != 1) try f finally zk.delete(lockPath) } }
Example 12
Source File: MesosClusterPersistenceEngine.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T]) } }
Example 13
Source File: ZooKeeperPersistenceEngine.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import akka.serialization.Serialization import scala.collection.JavaConversions._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.{Logging, SparkConf} import org.apache.spark.deploy.SparkCuratorUtil private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serialization: Serialization) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix)) file.map(deserializeFromFile[T]).flatten } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serializer = serialization.findSerializerFor(value) val serialized = serializer.toBinary(value) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, serialized) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) val clazz = m.runtimeClass.asInstanceOf[Class[T]] val serializer = serialization.serializerFor(clazz) try { Some(serializer.fromBinary(fileData).asInstanceOf[T]) } catch { case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } } }
Example 14
Source File: ZooKeeperPersistenceEngine.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(WORKING_DIR).asScala .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T]) } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } }
Example 15
Source File: MesosClusterPersistenceEngine.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T]) } }
Example 16
Source File: ZooKeeperPersistenceEngine.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import akka.serialization.Serialization import scala.collection.JavaConversions._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.{Logging, SparkConf} private[spark] class ZooKeeperPersistenceEngine(conf: SparkConf, val serialization: Serialization) extends PersistenceEngine with Logging { val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String) = { val file = zk.getChildren.forPath(WORKING_DIR).filter(_.startsWith(prefix)) file.map(deserializeFromFile[T]).flatten } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serializer = serialization.findSerializerFor(value) val serialized = serializer.toBinary(value) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, serialized) } def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) val clazz = m.runtimeClass.asInstanceOf[Class[T]] val serializer = serialization.serializerFor(clazz) try { Some(serializer.fromBinary(fileData).asInstanceOf[T]) } catch { case e: Exception => { logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } } }
Example 17
Source File: ZooKeeperPersistenceEngine.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(WORKING_DIR).asScala .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T]) } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } }
Example 18
Source File: MesosClusterPersistenceEngine.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler.cluster.mesos import scala.collection.JavaConverters._ import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NoNodeException import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.util.Utils private[spark] class ZookeeperMesosClusterPersistenceEngine( baseDir: String, zk: CuratorFramework, conf: SparkConf) extends MesosClusterPersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark_mesos_dispatcher") + "/" + baseDir SparkCuratorUtil.mkdir(zk, WORKING_DIR) def path(name: String): String = { WORKING_DIR + "/" + name } override def expunge(name: String): Unit = { zk.delete().forPath(path(name)) } override def persist(name: String, obj: Object): Unit = { val serialized = Utils.serialize(obj) val zkPath = path(name) zk.create().withMode(CreateMode.PERSISTENT).forPath(zkPath, serialized) } override def fetch[T](name: String): Option[T] = { val zkPath = path(name) try { val fileData = zk.getData().forPath(zkPath) Some(Utils.deserialize[T](fileData)) } catch { case e: NoNodeException => None case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(zkPath) None } } override def fetchAll[T](): Iterable[T] = { zk.getChildren.forPath(WORKING_DIR).asScala.flatMap(fetch[T]) } }
Example 19
Source File: ZkUtils.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.utils import java.nio.charset.StandardCharsets import org.apache.curator.framework.CuratorFramework import org.apache.kafka.common.TopicPartition import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.{NoNodeException, NodeExistsException} import org.apache.zookeeper.data.Stat def replicaAssignmentZkData(map: Map[String, Seq[Int]]): String = { toJson(Map("version" -> 1, "partitions" -> map)) } def readData(curator: CuratorFramework, path: String): (String, Stat) = { val stat: Stat = new Stat() val dataStr: String = curator.getData.storingStatIn(stat).forPath(path) (dataStr, stat) } def readDataMaybeNull(curator: CuratorFramework, path: String): (Option[String], Stat) = { val stat: Stat = new Stat() try { val dataStr: String = curator.getData.storingStatIn(stat).forPath(path) (Option(dataStr), stat) } catch { case e: NoNodeException => { (None, stat) } case e2: Throwable => throw e2 } } def getPartitionReassignmentZkData(partitionsToBeReassigned: Map[TopicPartition, Seq[Int]]): String = { toJson(Map("version" -> 1, "partitions" -> partitionsToBeReassigned.map(e => Map("topic" -> e._1.topic, "partition" -> e._1.partition, "replicas" -> e._2)))) } }
Example 20
Source File: ZookeeperDistributedQueue.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.resourcemanager.notify import java.util.Collections import com.webank.wedatasphere.linkis.common.utils.Logging import org.apache.zookeeper.ZooDefs.Ids import org.apache.zookeeper.{CreateMode, KeeperException, ZKUtil, ZooKeeper} import scala.collection.JavaConversions._ import scala.collection.mutable class ZookeeperDistributedQueue(zk: ZooKeeper, var queueName: String) extends DistributedQueue[Array[Byte]] with Logging { if (!queueName.startsWith("/")) queueName = "/" + queueName try if (zk.exists(queueName, false) == null) zk.create(queueName, new Array[Byte](0), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) catch { case e: KeeperException => error(s"Failed to create queue[$queueName]: ", e) } override def offer(value: Array[Byte]): Unit = { zk.create(queueName + "/element", value, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL) } override def poll(): Array[Byte] = { val path = head() if (path == null) return null val value = zk.getData(path, false, null) zk.delete(path, -1) value } override def peek(): Array[Byte] = { val path = head() if (path == null) return null zk.getData(path, false, null) } override def destroy(): Unit = { try if (zk.exists(queueName, false) == null) info(s"Queue[$queueName] already destroyed.") else ZKUtil.deleteRecursive(zk, queueName) catch { case e: KeeperException => error(s"Failed to destroy queue[$queueName]: ", e) } } private def head(): String = { val elements = zk.getChildren(queueName, false) if (elements.size == 0) return null Collections.sort(elements) queueName + "/" + elements.get(0) } override def copyToArray(): Array[Array[Byte]] = { val elements = zk.getChildren(queueName, false) if (elements.size == 0) return new Array[Array[Byte]](0) elements.map({ e => zk.getData(queueName + "/" + e, false, null) }).toArray } def indexOf(bytes: Array[Byte]): String = { val elements = zk.getChildren(queueName, false) elements.find(e => bytes.equals(zk.getData(queueName + "/" + e, false, null))).getOrElse("") } def copyToMap(): mutable.Map[String, Array[Byte]] = { val resultMap = mutable.Map.empty[String, Array[Byte]] val elements = zk.getChildren(queueName, false) if (elements.size == 0) return resultMap elements.map(e => resultMap.put(e, zk.getData(queueName + "/" + e, false, null))) resultMap } def remove(index: String) = if (index.length != 0) zk.delete(queueName + "/" + index, -1) } object ZookeeperDistributedQueue { def apply(queueName: String): ZookeeperDistributedQueue = new ZookeeperDistributedQueue(ZookeeperUtils.getOrCreateZookeeper(), queueName) def apply(zk: ZooKeeper, queueName: String): ZookeeperDistributedQueue = new ZookeeperDistributedQueue(zk, queueName) }
Example 21
Source File: ZooKeeperPersistenceEngine.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.deploy.master import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.reflect.ClassTag import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkCuratorUtil import org.apache.spark.internal.Logging import org.apache.spark.serializer.Serializer private[master] class ZooKeeperPersistenceEngine(conf: SparkConf, val serializer: Serializer) extends PersistenceEngine with Logging { private val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/master_status" private val zk: CuratorFramework = SparkCuratorUtil.newClient(conf) SparkCuratorUtil.mkdir(zk, WORKING_DIR) override def persist(name: String, obj: Object): Unit = { serializeIntoFile(WORKING_DIR + "/" + name, obj) } override def unpersist(name: String): Unit = { zk.delete().forPath(WORKING_DIR + "/" + name) } override def read[T: ClassTag](prefix: String): Seq[T] = { zk.getChildren.forPath(WORKING_DIR).asScala .filter(_.startsWith(prefix)).flatMap(deserializeFromFile[T]) } override def close() { zk.close() } private def serializeIntoFile(path: String, value: AnyRef) { val serialized = serializer.newInstance().serialize(value) val bytes = new Array[Byte](serialized.remaining()) serialized.get(bytes) zk.create().withMode(CreateMode.PERSISTENT).forPath(path, bytes) } private def deserializeFromFile[T](filename: String)(implicit m: ClassTag[T]): Option[T] = { val fileData = zk.getData().forPath(WORKING_DIR + "/" + filename) try { Some(serializer.newInstance().deserialize[T](ByteBuffer.wrap(fileData))) } catch { case e: Exception => logWarning("Exception while reading persisted file, deleting", e) zk.delete().forPath(WORKING_DIR + "/" + filename) None } } }