org.apache.kafka.common.serialization.ByteArraySerializer Scala Examples
The following examples show how to use org.apache.kafka.common.serialization.ByteArraySerializer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ProducerStream.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor.{ActorRef, ActorSystem} import akka.kafka.ProducerSettings import akka.kafka.scaladsl.Producer import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Source} import com.omearac.shared.JsonMessageConversion.Conversion import com.omearac.shared.{AkkaStreams, EventSourcing} import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} trait ProducerStream extends AkkaStreams with EventSourcing { implicit val system: ActorSystem def self: ActorRef def createStreamSource[msgType] = { Source.queue[msgType](Int.MaxValue,OverflowStrategy.backpressure) } def createStreamSink(producerProperties: Map[String, String]) = { val kafkaMBAddress = producerProperties("bootstrap-servers") val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers(kafkaMBAddress) Producer.plainSink(producerSettings) } def createStreamFlow[msgType: Conversion](producerProperties: Map[String, String]) = { val numberOfPartitions = producerProperties("num.partitions").toInt -1 val topicToPublish = producerProperties("publish-topic") val rand = new scala.util.Random val range = 0 to numberOfPartitions Flow[msgType].map { msg => val partition = range(rand.nextInt(range.length)) val stringJSONMessage = Conversion[msgType].convertToJson(msg) new ProducerRecord[Array[Byte], String](topicToPublish, partition, null, stringJSONMessage) } } }
Example 2
Source File: NumericalDataProducer.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.integrationtest.kafka import java.util.Properties import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.serialization.ByteArraySerializer import org.apache.log4j.Logger import org.apache.gearpump.streaming.serializer.ChillSerializer class NumericalDataProducer(topic: String, bootstrapServers: String) { private val LOG = Logger.getLogger(getClass) private val producer = createProducer private val WRITE_SLEEP_NANOS = 10 private val serializer = new ChillSerializer[Int] var lastWriteNum = 0 def start(): Unit = { produceThread.start() } def stop(): Unit = { if (produceThread.isAlive) { produceThread.interrupt() produceThread.join() } producer.close() } def producedNumbers: Range = { Range(1, lastWriteNum + 1) } private def createProducer: KafkaProducer[Array[Byte], Array[Byte]] = { val properties = new Properties() properties.setProperty("bootstrap.servers", bootstrapServers) new KafkaProducer[Array[Byte], Array[Byte]](properties, new ByteArraySerializer, new ByteArraySerializer) } private val produceThread = new Thread(new Runnable { override def run(): Unit = { try { while (!Thread.currentThread.isInterrupted) { lastWriteNum += 1 val msg = serializer.serialize(lastWriteNum) val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, msg) producer.send(record) Thread.sleep(0, WRITE_SLEEP_NANOS) } } catch { case ex: InterruptedException => LOG.error("message producing is stopped by an interrupt") } } }) }
Example 3
Source File: AbstractKafkaSink.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.kafka.lib.sink import java.util.Properties import org.apache.gearpump.Message import org.apache.gearpump.streaming.kafka.lib.sink.AbstractKafkaSink.KafkaProducerFactory import org.apache.gearpump.streaming.kafka.util.KafkaConfig import org.apache.gearpump.streaming.kafka.util.KafkaConfig.KafkaConfigFactory import org.apache.gearpump.streaming.sink.DataSink import org.apache.gearpump.streaming.task.TaskContext import org.apache.gearpump.util.LogUtil import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.serialization.ByteArraySerializer object AbstractKafkaSink { private val LOG = LogUtil.getLogger(classOf[AbstractKafkaSink]) val producerFactory = new KafkaProducerFactory { override def getKafkaProducer(config: KafkaConfig): KafkaProducer[Array[Byte], Array[Byte]] = { new KafkaProducer[Array[Byte], Array[Byte]](config.getProducerConfig, new ByteArraySerializer, new ByteArraySerializer) } } trait KafkaProducerFactory extends java.io.Serializable { def getKafkaProducer(config: KafkaConfig): KafkaProducer[Array[Byte], Array[Byte]] } } abstract class AbstractKafkaSink private[kafka]( topic: String, props: Properties, kafkaConfigFactory: KafkaConfigFactory, factory: KafkaProducerFactory) extends DataSink { import org.apache.gearpump.streaming.kafka.lib.sink.AbstractKafkaSink._ def this(topic: String, props: Properties) = { this(topic, props, new KafkaConfigFactory, AbstractKafkaSink.producerFactory) } private lazy val config = kafkaConfigFactory.getKafkaConfig(props) // Lazily construct producer since KafkaProducer is not serializable private lazy val producer = factory.getKafkaProducer(config) override def open(context: TaskContext): Unit = { LOG.info("KafkaSink opened") } override def write(message: Message): Unit = { message.value match { case (k: Array[Byte], v: Array[Byte]) => val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, k, v) producer.send(record) LOG.debug("KafkaSink sent record {} to Kafka", record) case v: Array[Byte] => val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, v) producer.send(record) LOG.debug("KafkaSink sent record {} to Kafka", record) case m => val errorMsg = s"unexpected message type ${m.getClass}; " + s"Array[Byte] or (Array[Byte], Array[Byte]) required" LOG.error(errorMsg) } } override def close(): Unit = { producer.close() LOG.info("KafkaSink closed") } }
Example 4
Source File: KafkaStore.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.kafka.lib.store import java.util.Properties import com.twitter.bijection.Injection import kafka.api.OffsetRequest import org.apache.gearpump.Time.MilliSeconds import org.apache.gearpump.streaming.kafka.lib.source.consumer.KafkaConsumer import org.apache.gearpump.streaming.kafka.util.KafkaConfig import org.apache.gearpump.streaming.kafka.util.KafkaConfig.KafkaConfigFactory import org.apache.gearpump.streaming.transaction.api.{CheckpointStore, CheckpointStoreFactory} import org.apache.gearpump.util.LogUtil import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.serialization.ByteArraySerializer class KafkaStore private[kafka]( val topic: String, val producer: KafkaProducer[Array[Byte], Array[Byte]], val optConsumer: Option[KafkaConsumer]) extends CheckpointStore { import org.apache.gearpump.streaming.kafka.lib.store.KafkaStore._ private var maxTime: MilliSeconds = 0L override def persist(time: MilliSeconds, checkpoint: Array[Byte]): Unit = { // make sure checkpointed timestamp is monotonically increasing // hence (1, 1), (3, 2), (2, 3) is checkpointed as (1, 1), (3, 2), (3, 3) if (time > maxTime) { maxTime = time } val key = maxTime val value = checkpoint val message = new ProducerRecord[Array[Byte], Array[Byte]]( topic, 0, Injection[Long, Array[Byte]](key), value) producer.send(message) LOG.debug("KafkaStore persisted state ({}, {})", key, value) } override def recover(time: MilliSeconds): Option[Array[Byte]] = { var checkpoint: Option[Array[Byte]] = None optConsumer.foreach { consumer => while (consumer.hasNext && checkpoint.isEmpty) { val kafkaMsg = consumer.next() checkpoint = for { k <- kafkaMsg.key t <- Injection.invert[MilliSeconds, Array[Byte]](k).toOption c = kafkaMsg.msg if t >= time } yield c } consumer.close() } checkpoint match { case Some(c) => LOG.info(s"KafkaStore recovered checkpoint ($time, $c)") case None => LOG.info(s"no checkpoint existing for $time") } checkpoint } override def close(): Unit = { producer.close() LOG.info("KafkaStore closed") } }
Example 5
Source File: ProcessingKafkaApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.kafka.scaladsl.{Consumer, Producer} import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions} import akka.stream.{ActorMaterializer, ClosedShape} import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord} import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer} import scala.concurrent.duration._ object ProcessingKafkaApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val bootstrapServers = "localhost:9092" val kafkaTopic = "akka_streams_topic" val partition = 0 val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition)) val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer) .withBootstrapServers(bootstrapServers) .withGroupId("akka_streams_group") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer) .withBootstrapServers(bootstrapServers) val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!") val kafkaSource = Consumer.plainSource(consumerSettings, subscription) val kafkaSink = Producer.plainSink(producerSettings) val printlnSink = Sink.foreach(println) val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem)) val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value()) tickSource ~> mapToProducerRecord ~> kafkaSink kafkaSource ~> mapFromConsumerRecord ~> printlnSink ClosedShape }) runnableGraph.run() }
Example 6
Source File: PredictionLogger.scala From ForestFlow with Apache License 2.0 | 5 votes |
package ai.forestflow.event.subscribers import java.nio.ByteOrder import ai.forestflow.domain.{PredictionEvent, PredictionEventGP} import ai.forestflow.serving.config.ApplicationEnvironment import akka.actor.{Actor, ActorLogging, Props} import akka.kafka.ProducerSettings import ai.forestflow.domain.{PredictionEvent, PredictionEventGP} import graphpipe.InferRequest import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} //import scalapb.json4s.JsonFormat import scala.util.{Success, Try} object PredictionLogger { private lazy val binaryProducerSettings = ProducerSettings(producerConfig, new StringSerializer, new ByteArraySerializer) private lazy val binaryProducer = binaryProducerSettings.createKafkaProducer() override def preStart(): Unit = { if (basic_topic.isDefined) context.system.eventStream.subscribe(self, classOf[PredictionEvent]) if (gp_topic.isDefined) context.system.eventStream.subscribe(self, classOf[PredictionEventGP]) super.preStart() } override def receive: Receive = { case event@PredictionEvent(prediction, servedRequest, inferenceRequest, loggingSettings) => val key = loggingSettings .keyFeatures .flatMap(inferenceRequest.configs.get) .mkString(loggingSettings.getKeyFeaturesSeparator) if (key.length > 0 ) binaryProducer.send(new ProducerRecord(basic_topic.get, key, event.toByteArray)) else binaryProducer.send(new ProducerRecord(basic_topic.get, event.toByteArray)) case event@PredictionEventGP(prediction, servedRequest, inferBytes, loggingSettings) => Try { val req = graphpipe.Request.getRootAsRequest(inferBytes.asReadOnlyByteBuffer().order(ByteOrder.LITTLE_ENDIAN)) val inferRequest = req.req(new InferRequest()).asInstanceOf[InferRequest] val inferConfigs = inferRequest.config() .split(",") .map(_.split(":")) .flatMap{ case Array(k, v) => Some((k, v)) case _ => None}.toMap loggingSettings .keyFeatures .flatMap(inferConfigs.get) .mkString(loggingSettings.getKeyFeaturesSeparator) } match { case Success(key) => binaryProducer.send(new ProducerRecord(gp_topic.get, key, event.toByteArray)) case _ => binaryProducer.send(new ProducerRecord(gp_topic.get, event.toByteArray)) } case _ => // ignore } }
Example 7
Source File: CreateProducerJ.scala From skafka with MIT License | 5 votes |
package com.evolutiongaming.skafka.producer import com.evolutiongaming.skafka.{Blocking, Bytes} import org.apache.kafka.clients.producer.{KafkaProducer, Producer => ProducerJ} import org.apache.kafka.common.serialization.ByteArraySerializer object CreateProducerJ { def apply[F[_]]( config: ProducerConfig, blocking: Blocking[F] ): F[ProducerJ[Bytes, Bytes]] = { val properties = config.properties val serializer = new ByteArraySerializer() blocking { new KafkaProducer[Bytes, Bytes](properties, serializer, serializer) } } }
Example 8
Source File: TransactionalProducer.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.Properties import akka.actor.Actor import akka.actor.Status.{Failure, Success} import akka.event.Logging import com.typesafe.config.Config import io.amient.affinity.Conf import io.amient.affinity.core.actor.{TransactionAbort, TransactionBegin, TransactionCommit, TransactionalRecord} import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.storage.StorageConf import io.amient.affinity.kafka.KafkaStorage.{KafkaConsumerConf, KafkaProducerConf} import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer import scala.collection.JavaConverters._ object KafkaConf extends KafkaConf { override def apply(config: Config): KafkaConf = new KafkaConf().apply(config) } class KafkaConf extends CfgStruct[KafkaConf](classOf[StorageConf]) { val BootstrapServers = string("kafka.bootstrap.servers", true).doc("kafka connection string used for consumer and/or producer") val Producer = struct("kafka.producer", new KafkaProducerConf, false).doc("any settings that the underlying version of kafka producer client supports") val Consumer = struct("kafka.consumer", new KafkaConsumerConf, false).doc("any settings that the underlying version of kafka consumer client supports") } class TransactionalProducer extends Actor { val logger = Logging.getLogger(context.system, this) private[this] var producer: KafkaProducer[Array[Byte], Array[Byte]] = null val kafkaConf = KafkaConf(Conf(context.system.settings.config).Affi.Storage) val producerConfig = new Properties() { if (kafkaConf.Producer.isDefined) { val producerConfig = kafkaConf.Producer.toMap() if (producerConfig.containsKey("bootstrap.servers")) throw new IllegalArgumentException("bootstrap.servers cannot be overriden for KafkaStroage producer") if (producerConfig.containsKey("key.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom key.serializer") if (producerConfig.containsKey("value.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom value.serializer") producerConfig.entrySet.asScala.filter(_.getValue.isDefined).foreach { case (entry) => put(entry.getKey, entry.getValue.apply.toString) } } put("bootstrap.servers", kafkaConf.BootstrapServers()) put("value.serializer", classOf[ByteArraySerializer].getName) put("key.serializer", classOf[ByteArraySerializer].getName) } override def receive: Receive = { case req@TransactionBegin(transactionalId) => req(sender) ! { if (producer == null) { producerConfig.put("transactional.id", transactionalId) producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig) logger.debug(s"Transactions.Init(transactional.id = $transactionalId)") producer.initTransactions() } logger.debug("Transactions.Begin()") producer.beginTransaction() } case TransactionalRecord(topic, key, value, timestamp, partition) => val replyto = sender val producerRecord = new ProducerRecord( topic, partition.map(new Integer(_)).getOrElse(null), timestamp.map(new java.lang.Long(_)).getOrElse(null), key, value) logger.debug(s"Transactions.Append(topic=$topic)") producer.send(producerRecord, new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { if (exception != null) { replyto ! Failure(exception) } else { replyto ! Success(metadata.offset()) } } }) case req@TransactionCommit() => req(sender) ! { logger.debug("Transactions.Commit()") producer.commitTransaction() } case req@TransactionAbort() => req(sender) ! { logger.debug("Transactions.Abort()") producer.abortTransaction() } } }
Example 9
Source File: KafkaTestClient.scala From haystack-traces with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trace.indexer.integration.clients import java.util.Properties import com.expedia.www.haystack.trace.indexer.config.entities.KafkaConfiguration import com.expedia.www.haystack.trace.indexer.integration.serdes.{SnappyCompressedSpanBufferProtoDeserializer, SpanProtoSerializer} import com.expedia.www.haystack.trace.indexer.serde.SpanDeserializer import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.ProducerConfig import org.apache.kafka.common.serialization.{ByteArraySerializer, StringDeserializer, StringSerializer} import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster object KafkaTestClient { val KAFKA_CLUSTER = new EmbeddedKafkaCluster(1) KAFKA_CLUSTER.start() } class KafkaTestClient { import KafkaTestClient._ val INPUT_TOPIC = "spans" val OUTPUT_TOPIC = "span-buffer" val APP_PRODUCER_CONFIG: Properties = { val props = new Properties() props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers) props.put(ProducerConfig.ACKS_CONFIG, "1") props.put(ProducerConfig.BATCH_SIZE_CONFIG, "20") props.put(ProducerConfig.RETRIES_CONFIG, "0") props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer]) props } val APP_CONSUMER_CONFIG: Properties = new Properties() val TEST_PRODUCER_CONFIG: Properties = { val props = new Properties() props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers) props.put(ProducerConfig.ACKS_CONFIG, "1") props.put(ProducerConfig.BATCH_SIZE_CONFIG, "20") props.put(ProducerConfig.RETRIES_CONFIG, "0") props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[SpanProtoSerializer]) props } val RESULT_CONSUMER_CONFIG = new Properties() def buildConfig = KafkaConfiguration(numStreamThreads = 1, pollTimeoutMs = 100, APP_CONSUMER_CONFIG, APP_PRODUCER_CONFIG, OUTPUT_TOPIC, INPUT_TOPIC, consumerCloseTimeoutInMillis = 3000, commitOffsetRetries = 3, commitBackoffInMillis = 250, maxWakeups = 5, wakeupTimeoutInMillis = 3000) def prepare(appId: String): Unit = { APP_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers) APP_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, appId + "-app-consumer") APP_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") APP_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer]) APP_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[SpanDeserializer]) APP_CONSUMER_CONFIG.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") RESULT_CONSUMER_CONFIG.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_CLUSTER.bootstrapServers) RESULT_CONSUMER_CONFIG.put(ConsumerConfig.GROUP_ID_CONFIG, appId + "-result-consumer") RESULT_CONSUMER_CONFIG.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") RESULT_CONSUMER_CONFIG.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer]) RESULT_CONSUMER_CONFIG.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[SnappyCompressedSpanBufferProtoDeserializer]) deleteTopics(INPUT_TOPIC, OUTPUT_TOPIC) KAFKA_CLUSTER.createTopic(INPUT_TOPIC, 2, 1) KAFKA_CLUSTER.createTopic(OUTPUT_TOPIC) } private def deleteTopics(topics: String*): Unit = KAFKA_CLUSTER.deleteTopicsAndWait(topics:_*) }
Example 10
Source File: CachedKafkaProducerSuite.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.kafka010 import java.{util => ju} import java.util.concurrent.ConcurrentMap import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.common.serialization.ByteArraySerializer import org.scalatest.PrivateMethodTester import org.apache.spark.sql.test.SharedSQLContext class CachedKafkaProducerSuite extends SharedSQLContext with PrivateMethodTester { type KP = KafkaProducer[Array[Byte], Array[Byte]] protected override def beforeEach(): Unit = { super.beforeEach() val clear = PrivateMethod[Unit]('clear) CachedKafkaProducer.invokePrivate(clear()) } test("Should return the cached instance on calling getOrCreate with same params.") { val kafkaParams = new ju.HashMap[String, Object]() kafkaParams.put("acks", "0") // Here only host should be resolvable, it does not need a running instance of kafka server. kafkaParams.put("bootstrap.servers", "127.0.0.1:9022") kafkaParams.put("key.serializer", classOf[ByteArraySerializer].getName) kafkaParams.put("value.serializer", classOf[ByteArraySerializer].getName) val producer = CachedKafkaProducer.getOrCreate(kafkaParams) val producer2 = CachedKafkaProducer.getOrCreate(kafkaParams) assert(producer == producer2) val cacheMap = PrivateMethod[ConcurrentMap[Seq[(String, Object)], KP]]('getAsMap) val map = CachedKafkaProducer.invokePrivate(cacheMap()) assert(map.size == 1) } test("Should close the correct kafka producer for the given kafkaPrams.") { val kafkaParams = new ju.HashMap[String, Object]() kafkaParams.put("acks", "0") kafkaParams.put("bootstrap.servers", "127.0.0.1:9022") kafkaParams.put("key.serializer", classOf[ByteArraySerializer].getName) kafkaParams.put("value.serializer", classOf[ByteArraySerializer].getName) val producer: KP = CachedKafkaProducer.getOrCreate(kafkaParams) kafkaParams.put("acks", "1") val producer2: KP = CachedKafkaProducer.getOrCreate(kafkaParams) // With updated conf, a new producer instance should be created. assert(producer != producer2) val cacheMap = PrivateMethod[ConcurrentMap[Seq[(String, Object)], KP]]('getAsMap) val map = CachedKafkaProducer.invokePrivate(cacheMap()) assert(map.size == 2) CachedKafkaProducer.close(kafkaParams) val map2 = CachedKafkaProducer.invokePrivate(cacheMap()) assert(map2.size == 1) import scala.collection.JavaConverters._ val (seq: Seq[(String, Object)], _producer: KP) = map2.asScala.toArray.apply(0) assert(_producer == producer) } }
Example 11
Source File: package.scala From kafka-scala-api with Apache License 2.0 | 5 votes |
package com import akka.actor.ActorSystem import akka.kafka.{ConsumerSettings, ProducerSettings} import akka.stream.ActorMaterializer import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer} package object example { implicit val system = ActorSystem("FlowProducerMain") implicit val materializer = ActorMaterializer() val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer) .withBootstrapServers("localhost:9092") val topic = "sample_topic" val topic1 = "topic1" val topic2 = "topic2" val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer) .withBootstrapServers("localhost:9092") .withGroupId("group1") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") }
Example 12
Source File: KafkaMessageSender.scala From kafka-with-akka-streams-kafka-streams-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.scala.kafka def batchWriteValue(topic: String, batch: Seq[Array[Byte]]): Seq[RecordMetadata] = { val result = batch.map(value => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, value)).get) producer.flush() result } def close(): Unit = { producer.close() } }
Example 13
Source File: KafkaMessageSender.scala From model-serving-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.modelserving.client import java.util.Properties import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer class MessageSender(val brokers: String) { import MessageSender._ val producer = new KafkaProducer[Array[Byte], Array[Byte]]( providerProperties(brokers, classOf[ByteArraySerializer].getName, classOf[ByteArraySerializer].getName)) def writeKeyValue(topic: String, key: Array[Byte], value: Array[Byte]): Unit = { val result = producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, key, value)).get producer.flush() } def writeValue(topic: String, value: Array[Byte]): Unit = { val result = producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null.asInstanceOf[Array[Byte]], value)).get producer.flush() } def batchWriteValue(topic: String, batch: Seq[Array[Byte]]): Seq[RecordMetadata] = { val result = batch.map(value => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null.asInstanceOf[Array[Byte]], value)).get) producer.flush() result } def close(): Unit = { producer.close() } }
Example 14
Source File: ProducerSettings.scala From Fast-Data-Processing-Systems-with-SMACK-Stack with MIT License | 5 votes |
import akka.kafka._ import akka.kafka.scaladsl._ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.serialization.ByteArraySerializer val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers("localhost:9092") Source(1 to 10000) .map(_.toString) .map(elem => new ProducerRecord[Array[Byte], String]("topic1", elem)) .to(Producer.plainSink(producerSettings)) Source(1 to 10000).map(elem => ProducerMessage.Message(new ProducerRecord[Array[Byte], String]("topic1", elem.toString), elem)) .via(Producer.flow(producerSettings)) .map { result => val record = result.message.record println(s"${record.topic}/${record.partition} ${result.offset}: ${record.value} (${result.message.passThrough}") result }