org.apache.zookeeper.server.NIOServerCnxnFactory Scala Examples

The following examples show how to use org.apache.zookeeper.server.NIOServerCnxnFactory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: KafkaTestUtils.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.io.File
import java.net.InetSocketAddress
import java.util.Arrays.asList
import java.util.Properties

import kafka.server.{KafkaConfig, KafkaServerStartable}
import org.apache.kafka.clients.admin.{AdminClient, NewTopic}
import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}

import scala.util.Random

class KafkaTestUtils {
  // zk
  private val zkHost = "localhost"
  private val zkPort = 2181
  private var zk: EmbeddedZookeeper = _
  private var zkReady = false

  // kafka
  private val brokerHost = "localhost"
  private val brokerPort = 9092
  private var kafkaServer: KafkaServerStartable = _
  private var topicCountMap = Map.empty[String, Int]
  private var brokerReady = false
  private var kafkaAdminClient: AdminClient = _

  
  @scala.annotation.varargs
  def createTopics(topics: String*): Unit =
    for (topic <- topics) {
      kafkaAdminClient.createTopics(asList(new NewTopic(topic, 1, 1: Short)))
      Thread.sleep(1000)
      topicCountMap = topicCountMap + (topic -> 1)
    }

  private def brokerProps: Properties = {
    val props = new Properties
    props.put("broker.id", "0")
    props.put("host.name", brokerHost)
    props.put("log.dir",
      {
        val dir = System.getProperty("java.io.tmpdir") +
          "/logDir-" + new Random().nextInt(Int.MaxValue)
        val f = new File(dir)
        f.mkdirs()
        dir
      }
    )
    props.put("port", brokerPort.toString)
    props.put("zookeeper.connect", zkAddress)
    props.put("zookeeper.connection.timeout.ms", "10000")
    props.put("offsets.topic.replication.factor", "1")
    props
  }

  private class EmbeddedZookeeper(hostname: String, port: Int) {
    private val snapshotDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "snapshotDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }
    private val logDir = {
      val f = new File(System.getProperty("java.io.tmpdir"),
        "logDir-" + Random.nextInt(Int.MaxValue))
      f.mkdirs()
      f
    }

    private val factory = {
      val zkTickTime = 500
      val zk = new ZooKeeperServer(snapshotDir, logDir, zkTickTime)
      val f = new NIOServerCnxnFactory
      val maxCnxn = 16
      f.configure(new InetSocketAddress(hostname, port), maxCnxn)
      f.startup(zk)
      f
    }

    def shutdown(): Unit = {
      factory.shutdown()
      snapshotDir.delete()
      logDir.delete()
      ()
    }
  }
} 
Example 2
Source File: EmbeddedZookeeper.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.io.{File => JFile}
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicReference

import scala.util.Try
import org.I0Itec.zkclient.exception.ZkMarshallingError
import org.I0Itec.zkclient.serialize.ZkSerializer
import org.apache.zookeeper.server.{ NIOServerCnxnFactory, ZooKeeperServer }


  def start(): Unit = {
    val server = new ZooKeeperServer(snapDir, dataDir, tickTime)
    _zookeeper.set(Some(server))

    val (ip, port) = {
      val splits = connectTo.split(":")
      (splits(0), splits(1).toInt)
    }

    val f = new NIOServerCnxnFactory()
    f.configure(new InetSocketAddress(ip, port), 16)
    f.startup(server)

    _factory.set(Some(f))

    logger.info(s"ZooKeeperServer isRunning: $isRunning")
  }

  def shutdown(): Unit = {
    logger.info(s"Shutting down ZK NIOServerCnxnFactory.")

    for (v <- _factory.get) v.shutdown()
    _factory.set(None)

    for (v <- _zookeeper.get) {
      Try(v.shutdown())
      //awaitCond(!v.isRunning, 2000.millis)
      logger.info(s"ZooKeeper server shut down.")
    }
    _zookeeper.set(None)
  }
}

object DefaultStringSerializer extends ZkSerializer {

  @throws(classOf[ZkMarshallingError])
  def serialize(data: Object): Array[Byte] = data match {
    case a: String => a.getBytes("UTF-8")
    case _         => throw new ZkMarshallingError(s"Unsupported type '${data.getClass}'")
  }

  @throws(classOf[ZkMarshallingError])
  def deserialize(bytes: Array[Byte]): Object = bytes match {
    case b if Option(b).isEmpty => "" //ick
    case b                      => new String(bytes, "UTF-8")
  }
} 
Example 3
Source File: EmbeddedZookeperServer.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.kafka

import java.io.File
import java.net.InetSocketAddress

import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}
import org.slf4j.LoggerFactory

trait EmbeddedZookeperServer extends EmbeddedService {

  private val logger = LoggerFactory.getLogger(classOf[EmbeddedZookeperServer])

  private val embeddedZkPath = new File(testDir, "local-zookeeper")
  // smaller testDir footprint, default zookeeper file blocks are 65535Kb
  System.getProperties().setProperty("zookeeper.preAllocSize", "64")
  private val zookeeper = new ZooKeeperServer(new File(embeddedZkPath, "snapshots"), new File(embeddedZkPath, "logs"), 3000)
  private val zkFactory = new NIOServerCnxnFactory
  zkFactory.configure(new InetSocketAddress(0), 10)
  val zkConnect = "localhost:" + zkFactory.getLocalPort
  logger.info(s"Embedded ZooKeeper $zkConnect, data directory: $testDir")
  zkFactory.startup(zookeeper)

  abstract override def close(): Unit = try zkFactory.shutdown() finally super.close

}