kafka.admin.AdminUtils Scala Examples

The following examples show how to use kafka.admin.AdminUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: KafkaClient.scala    From incubator-retired-gearpump   with Apache License 2.0 6 votes vote down vote up
package org.apache.gearpump.streaming.kafka.lib.util

import kafka.admin.AdminUtils
import kafka.cluster.Broker
import kafka.common.TopicAndPartition
import kafka.consumer.SimpleConsumer
import kafka.utils.{ZKStringSerializer, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.gearpump.streaming.kafka.lib.source.consumer.KafkaConsumer
import org.apache.gearpump.streaming.kafka.util.KafkaConfig
import org.apache.gearpump.util.LogUtil
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.kafka.common.serialization.Serializer

object KafkaClient {
  private val LOG = LogUtil.getLogger(classOf[KafkaClient])

  val factory = new KafkaClientFactory

  class KafkaClientFactory extends java.io.Serializable {
    def getKafkaClient(config: KafkaConfig): KafkaClient = {
      val consumerConfig = config.getConsumerConfig
      val zkClient = new ZkClient(consumerConfig.zkConnect, consumerConfig.zkSessionTimeoutMs,
        consumerConfig.zkConnectionTimeoutMs, ZKStringSerializer)
      new KafkaClient(config, zkClient)
    }
  }
}

class KafkaClient(config: KafkaConfig, zkClient: ZkClient) {
  import org.apache.gearpump.streaming.kafka.lib.util.KafkaClient._

  private val consumerConfig = config.getConsumerConfig

  def getTopicAndPartitions(consumerTopics: List[String]): Array[TopicAndPartition] = {
    try {
      ZkUtils.getPartitionsForTopics(zkClient, consumerTopics).flatMap {
        case (topic, partitions) => partitions.map(TopicAndPartition(topic, _))
      }.toArray
    } catch {
      case e: Exception =>
        LOG.error(e.getMessage)
        throw e
    }
  }

  def getBroker(topic: String, partition: Int): Broker = {
    try {
      val leader = ZkUtils.getLeaderForPartition(zkClient, topic, partition)
        .getOrElse(throw new RuntimeException(
          s"leader not available for TopicAndPartition($topic, $partition)"))
      ZkUtils.getBrokerInfo(zkClient, leader)
        .getOrElse(throw new RuntimeException(s"broker info not found for leader $leader"))
    } catch {
      case e: Exception =>
        LOG.error(e.getMessage)
        throw e
    }
  }

  def createConsumer(topic: String, partition: Int, startOffsetTime: Long): KafkaConsumer = {
    val broker = getBroker(topic, partition)
    val soTimeout = consumerConfig.socketTimeoutMs
    val soBufferSize = consumerConfig.socketReceiveBufferBytes
    val clientId = consumerConfig.clientId
    val fetchSize = consumerConfig.fetchMessageMaxBytes
    val consumer = new SimpleConsumer(broker.host, broker.port, soTimeout, soBufferSize, clientId)
    KafkaConsumer(topic, partition, startOffsetTime, fetchSize, consumer)
  }

  def createProducer[K, V](keySerializer: Serializer[K],
      valueSerializer: Serializer[V]): KafkaProducer[K, V] = {
    new KafkaProducer[K, V](config.getProducerConfig, keySerializer, valueSerializer)
  }

  
  def createTopic(topic: String, partitions: Int, replicas: Int): Boolean = {
    try {
      if (AdminUtils.topicExists(zkClient, topic)) {
        LOG.info(s"topic $topic exists")
        true
      } else {
        AdminUtils.createTopic(zkClient, topic, partitions, replicas)
        LOG.info(s"created topic $topic")
        false
      }
    } catch {
      case e: Exception =>
        LOG.error(e.getMessage)
        throw e
    }
  }

  def close(): Unit = {
    zkClient.close()
  }
} 
Example 2
Source File: KafkaConfiguratorIntSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import common.KafkaIntSpec
import kafka.admin.AdminUtils
import org.scalatest.concurrent.Eventually

import scala.util.Success

class KafkaConfiguratorIntSpec extends KafkaIntSpec with Eventually {

  "KafkaConfigurator" should "create new topics in Kafka from multiple input files" in {
    val topics = List("topic1", "topic2", "topic3")

    topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe false)

    Main.run(testArgs(Seq("/topic-configuration.yml", "/topic-configuration-2.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true)
      }
    }
  }

  it should "still configure all topics when one fails" in {
    val correctTopics = List("correctConfig1", "correctConfig2")
    val errorTopic = "errorConfig"

    (correctTopics :+ errorTopic).map(AdminUtils.topicExists(zkUtils, _) shouldBe false)

    Main.run(testArgs(Seq("/topic-configuration-with-error.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        correctTopics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true)
      }
      withClue("Topic doesn't exist: ") {
        AdminUtils.topicExists(zkUtils, errorTopic) shouldBe false
      }
    }
  }

  it should "configure topics from correct files if another input file is empty" in {
    val topic = "topic4"

    AdminUtils.topicExists(zkUtils, topic) shouldBe false

    Main.run(testArgs(Seq("/topic-configuration-3.yml", "/no-topics.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        AdminUtils.topicExists(zkUtils, topic) shouldBe true
      }
    }
  }

  private def testArgs(filePaths: Seq[String]): Array[String] =
    Array(
      "-f", filePaths.map(path => getClass.getResource(path).getPath).mkString(","),
      "--bootstrap-servers", s"localhost:${kafkaServer.kafkaPort}"
    )
} 
Example 3
Source File: KafkaServerHarness.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.kafka.util

import java.util.Properties

import kafka.admin.AdminUtils
import kafka.common.KafkaException
import kafka.server.{KafkaConfig => KafkaServerConfig, KafkaServer}
import kafka.utils.{TestUtils, Utils}

trait KafkaServerHarness extends ZookeeperHarness {
  val configs: List[KafkaServerConfig]
  private var servers: List[KafkaServer] = null
  private var brokerList: String = null

  def getServers: List[KafkaServer] = servers
  def getBrokerList: String = brokerList

  override def setUp() {
    super.setUp()
    if (configs.size <= 0) {
      throw new KafkaException("Must supply at least one server config.")
    }
    brokerList = TestUtils.getBrokerListStrFromConfigs(configs)
    servers = configs.map(TestUtils.createServer(_))
  }

  override def tearDown() {
    servers.foreach(_.shutdown())
    servers.foreach(_.config.logDirs.foreach(Utils.rm))
    super.tearDown()
  }

  def createTopicUntilLeaderIsElected(topic: String, partitions: Int,
      replicas: Int, timeout: Long = 10000): Map[Int, Option[Int]] = {
    val zkClient = getZkClient
    try {
      // Creates topic
      AdminUtils.createTopic(zkClient, topic, partitions, replicas, new Properties)
      // Waits until the update metadata request for new topic reaches all servers
      (0 until partitions).map { case i =>
        TestUtils.waitUntilMetadataIsPropagated(servers, topic, i, timeout)
        i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i, timeout)
      }.toMap
    } catch {
      case e: Exception => throw e
    }
  }
} 
Example 4
Source File: MetricsUtil.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package com.intel.hibench.common.streaming.metrics

import com.intel.hibench.common.streaming.Platform
import kafka.admin.AdminUtils
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient

object MetricsUtil {

  val TOPIC_CONF_FILE_NAME = "metrics_topic.conf"

  def getTopic(platform: Platform, sourceTopic: String, producerNum: Int,
               recordPerInterval: Long, intervalSpan: Int): String = {
    val topic = s"${platform}_${sourceTopic}_${producerNum}_${recordPerInterval}" +
      s"_${intervalSpan}_${System.currentTimeMillis()}"
    println(s"metrics is being written to kafka topic $topic")
    topic
  }

  def createTopic(zkConnect: String, topic: String, partitions: Int): Unit = {
    val zkClient = new ZkClient(zkConnect, 6000, 6000, ZKStringSerializer)
    try {
      AdminUtils.createTopic(zkClient, topic, partitions, 1)
      while (!AdminUtils.topicExists(zkClient, topic)) {
        Thread.sleep(100)
      }
    } catch {
      case e: Exception =>
        throw e
    } finally {
      zkClient.close()
    }
  }
}