org.apache.kafka.common.PartitionInfo Scala Examples
The following examples show how to use org.apache.kafka.common.PartitionInfo.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: WordCountProducer.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import java.util import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.kafka.ProducerMessage.Message import akka.kafka.ProducerSettings import akka.kafka.scaladsl.Producer import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import org.apache.kafka.clients.producer.{Partitioner, ProducerRecord} import org.apache.kafka.common.errors.{NetworkException, UnknownTopicOrPartitionException} import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{Cluster, PartitionInfo} import scala.concurrent.Future import scala.concurrent.duration._ class CustomPartitioner extends Partitioner { override def partition(topic: String, key: Any, keyBytes: Array[Byte], value: Any, valueBytes: Array[Byte], cluster: Cluster): Int = { val partitionInfoList: util.List[PartitionInfo] = cluster.availablePartitionsForTopic(topic) val partitionCount = partitionInfoList.size val fakeNewsPartition = 0 //println("CustomPartitioner received key: " + key + " and value: " + value) if (value.toString.contains(WordCountProducer.fakeNewsKeyword)) { //println("CustomPartitioner send message: " + value + " to fakeNewsPartition") fakeNewsPartition } else ThreadLocalRandom.current.nextInt(1, partitionCount) //round robin } override def close(): Unit = { println("CustomPartitioner: " + Thread.currentThread + " received close") } override def configure(configs: util.Map[String, _]): Unit = { println("CustomPartitioner received configure with configuration: " + configs) } } object CustomPartitioner { private def deserialize[V](objectData: Array[Byte]): V = org.apache.commons.lang3.SerializationUtils.deserialize(objectData).asInstanceOf[V] }
Example 2
Source File: KafkaConsumerProxy.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.consumer import akka.actor.Actor import akka.pattern.pipe import hydra.kafka.consumer.KafkaConsumerProxy._ import hydra.kafka.util.KafkaUtils import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.common.{PartitionInfo, TopicPartition} import scala.collection.JavaConverters._ import scala.collection.immutable.Map import scala.concurrent.Future class KafkaConsumerProxy extends Actor { private var _defaultConsumer: Consumer[String, String] = _ private implicit val ec = context.dispatcher override def preStart(): Unit = { _defaultConsumer = KafkaUtils.stringConsumerSettings.createKafkaConsumer() } override def receive: Receive = { case GetLatestOffsets(topic) => val requestor = sender pipe(latestOffsets(topic).map(LatestOffsetsResponse(topic, _))) to requestor case GetPartitionInfo(topic) => val requestor = sender pipe(partitionInfo(topic).map(PartitionInfoResponse(topic, _))) to requestor case ListTopics => val requestor = sender pipe(listTopics().map(ListTopicsResponse(_))) to requestor } override def postStop(): Unit = { _defaultConsumer.close() } private def latestOffsets( topic: String ): Future[Map[TopicPartition, Long]] = { Future { val ts = _defaultConsumer .partitionsFor(topic) .asScala .map(pi => new TopicPartition(topic, pi.partition())) _defaultConsumer .endOffsets(ts.asJava) .asScala .map(tp => tp._1 -> tp._2.toLong) .toMap } } private def partitionInfo(topic: String): Future[Seq[PartitionInfo]] = Future(_defaultConsumer.partitionsFor(topic).asScala) private def listTopics(): Future[Map[String, Seq[PartitionInfo]]] = { Future(_defaultConsumer.listTopics().asScala.toMap) .map(res => res.mapValues(_.asScala.toSeq)) } } object KafkaConsumerProxy { case class GetLatestOffsets(topic: String) case class LatestOffsetsResponse( topic: String, offsets: Map[TopicPartition, Long] ) case class GetPartitionInfo(topic: String) case class PartitionInfoResponse( topic: String, partitionInfo: Seq[PartitionInfo] ) case object ListTopics case class ListTopicsResponse(topics: Map[String, Seq[PartitionInfo]]) }
Example 3
Source File: HydraKafkaJsonSupport.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.marshallers import akka.http.scaladsl.marshalling.{Marshaller, Marshalling} import akka.http.scaladsl.model.ContentTypes import akka.util.ByteString import hydra.core.marshallers.HydraJsonSupport import org.apache.kafka.common.{Node, PartitionInfo} import spray.json.{JsNumber, JsObject, JsString, JsValue, JsonFormat} import scala.concurrent.Future trait HydraKafkaJsonSupport extends HydraJsonSupport { implicit object NodeJsonFormat extends JsonFormat[Node] { override def write(node: Node): JsValue = { JsObject( "id" -> JsNumber(node.idString), "host" -> JsString(node.host), "port" -> JsNumber(node.port) ) } override def read(json: JsValue): Node = { json.asJsObject.getFields("id", "host", "port") match { case Seq(id, host, port) => new Node( id.convertTo[Int], host.convertTo[String], port.convertTo[Int] ) case other => spray.json.deserializationError( "Cannot deserialize Node. Invalid input: " + other ) } } } implicit object PartitionInfoJsonFormat extends JsonFormat[PartitionInfo] { import spray.json._ override def write(p: PartitionInfo): JsValue = { JsObject( "partition" -> JsNumber(p.partition()), "leader" -> p.leader().toJson, "isr" -> JsArray(p.inSyncReplicas().toJson) ) } override def read(json: JsValue): PartitionInfo = ??? } implicit val stringFormat = Marshaller[String, ByteString] { ec ⇒ s => Future.successful { List( Marshalling.WithFixedContentType( ContentTypes.`application/json`, () => ByteString(s) ) ) } } }
Example 4
Source File: HydraKafkaJsonSupportSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.marshallers import org.apache.kafka.common.{Node, PartitionInfo} import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike class HydraKafkaJsonSupportSpec extends Matchers with AnyFunSpecLike with HydraKafkaJsonSupport { import spray.json._ describe("When marshalling kafka objects") { it("converts Nodes") { val node = new Node(1, "host", 9092) node.toJson shouldBe """{"id":1,"host":"host","port":9092}""".parseJson val nodeJ = """{"id":1,"host":"host","port":9092}""".parseJson.convertTo[Node] nodeJ.id shouldBe 1 nodeJ.host shouldBe "host" nodeJ.port shouldBe 9092 } it("converts Partitions") { val node = new Node(1, "host", 9092) val p = new PartitionInfo("topic", 0, node, Array(node), Array(node)) p.toJson shouldBe """{"partition":0,"leader":{"id":1,"host":"host","port":9092},"isr":[[{"id":1,"host":"host","port":9092}]]}""".parseJson intercept[NotImplementedError] { """{"partition":0,"leader":{"id":1,"host":"host","port":9092},"isr":[[{"id":1,"host":"host","port":9092}]]}""".parseJson .convertTo[PartitionInfo] } } } }
Example 5
Source File: KafkaPartitionerSpec.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util import akka.serialization.SerializationExtension import com.typesafe.config.ConfigFactory import io.amient.affinity.AffinityActorSystem import io.amient.affinity.avro.MemorySchemaRegistry import io.amient.affinity.core.Murmur2Partitioner import org.apache.kafka.common.{Cluster, Node, PartitionInfo} import org.apache.kafka.streams.processor.internals.DefaultStreamPartitioner import org.scalatest.{FlatSpec, Matchers} import scala.collection.JavaConverters._ class KafkaPartitionerSpec extends FlatSpec with Matchers { def mockCluster(numParts: Int) = new Cluster("mock-cluster", util.Arrays.asList[Node](), (0 to numParts - 1).map(p => new PartitionInfo("test", p, null, Array(), Array())).asJava, new util.HashSet[String], new util.HashSet[String]) "kafka.DefaultPartitioner" should "have identical method to Murmur2Partitioner" in { val kafkaPartitioner = new org.apache.kafka.clients.producer.internals.DefaultPartitioner() val affinityPartitioner = new Murmur2Partitioner val key = "test-value-for-partitioner" val serializedKey: Array[Byte] = key.getBytes val kafkaP = kafkaPartitioner.partition("test", key, serializedKey, key, serializedKey, mockCluster(4)) val affinityP = affinityPartitioner.partition(serializedKey, 4) kafkaP should equal(affinityP) } "KafkaAvroSerde" should "have identical serialization footprint as Akka AvroSerdeProxy" in { val cfg = Map( "schema.registry.class" -> classOf[MemorySchemaRegistry].getName, "schema.registry.id" -> "1" ) val key = "6290853012217500191217" val system = AffinityActorSystem.create(ConfigFactory.parseMap( ((cfg.map { case (k, v) => ("affinity.avro." + k, v) }) + ("affinity.system.name" -> "KafkaPartitionerSpec")).asJava)) val akkaSerializedKey = try { val serialization = SerializationExtension(system) serialization.serialize(key).get } finally { system.terminate() } val kafkaSerde = new KafkaAvroSerde() kafkaSerde.configure(cfg.asJava, true) val kafkaSerialized = kafkaSerde.serializer().serialize("test", key) akkaSerializedKey.mkString(".") should equal(kafkaSerialized.mkString(".")) new Murmur2Partitioner().partition(akkaSerializedKey, 9) should be(4) new Murmur2Partitioner().partition(kafkaSerialized, 9) should be(4) val streamsPartitioner = new DefaultStreamPartitioner[Any, Any](kafkaSerde.serializer(), mockCluster(9)) streamsPartitioner.partition("test", key, null, 9) should be(4) streamsPartitioner.partition("test", key, "irrelevant", 9) should be(4) } }
Example 6
Source File: KafkaConsumer.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing.internal import java.time.Duration import java.util.Properties import java.util.concurrent.Executors import cats.effect.{ Async, ContextShift, Resource } import cats.~> import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords } import org.apache.kafka.common.PartitionInfo import org.apache.kafka.common.serialization.Deserializer import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V]( withConsumer: (Consumer[K, V] => *) ~> F ) { def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] = withConsumer(_.subscribe(topics.asJava, listener)) def subscribe(topics: Set[String]): F[Unit] = withConsumer(_.subscribe(topics.asJava)) val unsubscribe: F[Unit] = withConsumer(_.unsubscribe()) def partitionsFor(topic: String): F[Set[PartitionInfo]] = withConsumer(_.partitionsFor(topic).asScala.toSet) def close: F[Unit] = withConsumer(_.close()) def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] = withConsumer(_.poll(Duration.ofNanos(timeout.toNanos))) } private[kafkadistributedprocessing] object KafkaConsumer { final class Create[F[_]] { def apply[K, V]( config: Properties, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V] )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = { val create = F.suspend { val executor = Executors.newSingleThreadExecutor() def eval[A](a: => A): F[A] = contextShift.evalOn(ExecutionContext.fromExecutor(executor)) { F.async[A] { cb => executor.execute(new Runnable { override def run(): Unit = cb { try Right(a) catch { case e: Throwable => Left(e) } } }) } } eval { val original = Thread.currentThread.getContextClassLoader Thread.currentThread.setContextClassLoader(null) val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V]( config, keyDeserializer, valueDeserializer ) Thread.currentThread.setContextClassLoader(original) val withConsumer = new ((Consumer[K, V] => *) ~> F) { def apply[A](f: Consumer[K, V] => A): F[A] = eval(f(consumer)) } new KafkaConsumer[F, K, V](withConsumer) } } Resource.make(create)(_.close) } } def create[F[_]]: Create[F] = new Create[F] }