org.apache.kafka.clients.producer.Callback Scala Examples
The following examples show how to use org.apache.kafka.clients.producer.Callback.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: RDDKafkaWriter.scala From spark-kafka-writer with Apache License 2.0 | 5 votes |
package com.github.benfradet.spark.kafka.writer import org.apache.kafka.clients.producer.{Callback, ProducerRecord} import org.apache.spark.rdd.RDD import scala.reflect.ClassTag override def writeToKafka[K, V]( producerConfig: Map[String, Object], transformFunc: T => ProducerRecord[K, V], callback: Option[Callback] = None ): Unit = rdd.foreachPartition { partition => val producer = KafkaProducerCache.getProducer[K, V](producerConfig) partition .map(transformFunc) .foreach(record => producer.send(record, callback.orNull)) } }
Example 2
Source File: DStreamKafkaWriter.scala From spark-kafka-writer with Apache License 2.0 | 5 votes |
package com.github.benfradet.spark.kafka.writer import org.apache.kafka.clients.producer.{Callback, ProducerRecord} import org.apache.spark.streaming.dstream.DStream import scala.reflect.ClassTag override def writeToKafka[K, V]( producerConfig: Map[String, Object], transformFunc: T => ProducerRecord[K, V], callback: Option[Callback] = None ): Unit = dStream.foreachRDD { rdd => val rddWriter = new RDDKafkaWriter[T](rdd) rddWriter.writeToKafka(producerConfig, transformFunc, callback) } }
Example 3
Source File: KafkaMessagingSystem.scala From amadou with Apache License 2.0 | 5 votes |
package com.mediative.amadou package monitoring import java.util.Properties import com.typesafe.config.Config import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} class KafkaMessagingSystem(config: Config) extends MessagingSystem with Logging { private val properties = KafkaMessagingSystem.readProperties(config) private val producer = new KafkaProducer[String, String](properties) private val topicPrefix = properties.getProperty("topic.prefix") override def publish(topic: String, message: String): Unit = { val topicName = s"$topicPrefix-$topic" logger.info(s"Publishing to $topicName :\n$message\n") producer.send(new ProducerRecord[String, String](topicName, message), new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = if (exception != null) { logger .error(s"Cannot publish to $topicName. Caused by: ${exception.getMessage}", exception) } }) () } override def stop(): Unit = producer.close() } object KafkaMessagingSystem { def readProperties(config: Config): Properties = { val propertiesKeys = Seq( "bootstrap.servers", "acks", "retries", "batch.size", "linger.ms", "buffer.memory", "key.serializer", "value.serializer", "topic.prefix") val properties = new Properties() propertiesKeys.foreach(key => properties.setProperty(key, config.getString(key))) properties } }
Example 4
Source File: HydraKafkaCallback.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import akka.actor.ActorSelection import hydra.core.monitor.HydraMetrics import hydra.core.transport.TransportCallback import hydra.kafka.transport.KafkaTransport.RecordProduceError import org.apache.kafka.clients.producer.{Callback, RecordMetadata} case class HydraKafkaCallback( deliveryId: Long, record: KafkaRecord[_, _], producer: ActorSelection, callback: TransportCallback ) extends Callback { override def onCompletion(metadata: RecordMetadata, e: Exception): Unit = { Option(e) match { case Some(err) => ackError(err) case None => doAck(metadata) } } private def doAck(md: RecordMetadata) = { val kmd = KafkaRecordMetadata(md, deliveryId, record.ackStrategy) producer ! kmd callback.onCompletion(deliveryId, Some(kmd), None) } private def ackError(e: Exception) = { producer ! RecordProduceError(deliveryId, record, e) callback.onCompletion(deliveryId, None, Some(e)) } }
Example 5
Source File: KafkaAsReceiver.scala From spark-http-stream with BSD 2-Clause "Simplified" License | 5 votes |
package org.apache.spark.sql.execution.streaming.http import java.util.Properties import org.apache.kafka.clients.producer.Callback import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.clients.producer.RecordMetadata import org.apache.spark.internal.Logging class KafkaAsReceiver(bootstrapServers: String) extends AbstractActionsHandler with SendStreamActionSupport with Logging { val props = new Properties(); props.put("bootstrap.servers", bootstrapServers); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); val producer = new KafkaProducer[String, String](props); override def listActionHandlerEntries(requestBody: Map[String, Any]): PartialFunction[String, Map[String, Any]] = { case "actionSendStream" ⇒ handleSendStream(requestBody); } override def destroy() { producer.close(); } override def onReceiveStream(topic: String, rows: Array[RowEx]) = { var index = -1; for (row ← rows) { index += 1; val key = "" + row.batchId + "-" + row.offsetInBatch; //TODO: send an array instead of a string value? val value = row.originalRow(0).toString(); val record = new ProducerRecord[String, String](topic, key, value); producer.send(record, new Callback() { def onCompletion(metadata: RecordMetadata, e: Exception) = { if (e != null) { e.printStackTrace(); logError(e.getMessage); } else { val offset = metadata.offset(); val partition = metadata.partition(); logDebug(s"record is sent to kafka:key=$key, value=$value, partition=$partition, offset=$offset"); } } }); } } } class KafkaAsReceiverFactory extends ActionsHandlerFactory { def createInstance(params: Params) = new KafkaAsReceiver(params.getRequiredString("bootstrapServers")); }
Example 6
Source File: ExternalKafkaProcessorSupplier.scala From haystack-trends with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trends.kstream.processor import com.expedia.metrics.MetricData import com.expedia.www.haystack.trends.config.entities.KafkaProduceConfiguration import com.expedia.www.haystack.trends.kstream.serde.TrendMetricSerde.metricRegistry import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.streams.processor.{AbstractProcessor, Processor, ProcessorContext, ProcessorSupplier} import org.slf4j.LoggerFactory class ExternalKafkaProcessorSupplier(kafkaProduceConfig: KafkaProduceConfiguration) extends ProcessorSupplier[String, MetricData] { private val LOGGER = LoggerFactory.getLogger(this.getClass) private val metricPointExternalKafkaSuccessMeter = metricRegistry.meter("metricpoint.kafka-external.success") private val metricPointExternalKafkaFailureMeter = metricRegistry.meter("metricpoint.kafka-external.failure") def get: Processor[String, MetricData] = { new ExternalKafkaProcessor(kafkaProduceConfig: KafkaProduceConfiguration) } def process(key: String, value: MetricData): Unit = { val kafkaMessage = new ProducerRecord(kafkaProduceTopic, key, value) kafkaProducer.send(kafkaMessage, new Callback { override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = { if (e != null) { LOGGER.error(s"Failed to produce the message to kafka for topic=$kafkaProduceTopic, with reason=", e) metricPointExternalKafkaFailureMeter.mark() } else { metricPointExternalKafkaSuccessMeter.mark() } } }) } } }
Example 7
Source File: TransactionalProducer.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.Properties import akka.actor.Actor import akka.actor.Status.{Failure, Success} import akka.event.Logging import com.typesafe.config.Config import io.amient.affinity.Conf import io.amient.affinity.core.actor.{TransactionAbort, TransactionBegin, TransactionCommit, TransactionalRecord} import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.storage.StorageConf import io.amient.affinity.kafka.KafkaStorage.{KafkaConsumerConf, KafkaProducerConf} import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer import scala.collection.JavaConverters._ object KafkaConf extends KafkaConf { override def apply(config: Config): KafkaConf = new KafkaConf().apply(config) } class KafkaConf extends CfgStruct[KafkaConf](classOf[StorageConf]) { val BootstrapServers = string("kafka.bootstrap.servers", true).doc("kafka connection string used for consumer and/or producer") val Producer = struct("kafka.producer", new KafkaProducerConf, false).doc("any settings that the underlying version of kafka producer client supports") val Consumer = struct("kafka.consumer", new KafkaConsumerConf, false).doc("any settings that the underlying version of kafka consumer client supports") } class TransactionalProducer extends Actor { val logger = Logging.getLogger(context.system, this) private[this] var producer: KafkaProducer[Array[Byte], Array[Byte]] = null val kafkaConf = KafkaConf(Conf(context.system.settings.config).Affi.Storage) val producerConfig = new Properties() { if (kafkaConf.Producer.isDefined) { val producerConfig = kafkaConf.Producer.toMap() if (producerConfig.containsKey("bootstrap.servers")) throw new IllegalArgumentException("bootstrap.servers cannot be overriden for KafkaStroage producer") if (producerConfig.containsKey("key.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom key.serializer") if (producerConfig.containsKey("value.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom value.serializer") producerConfig.entrySet.asScala.filter(_.getValue.isDefined).foreach { case (entry) => put(entry.getKey, entry.getValue.apply.toString) } } put("bootstrap.servers", kafkaConf.BootstrapServers()) put("value.serializer", classOf[ByteArraySerializer].getName) put("key.serializer", classOf[ByteArraySerializer].getName) } override def receive: Receive = { case req@TransactionBegin(transactionalId) => req(sender) ! { if (producer == null) { producerConfig.put("transactional.id", transactionalId) producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig) logger.debug(s"Transactions.Init(transactional.id = $transactionalId)") producer.initTransactions() } logger.debug("Transactions.Begin()") producer.beginTransaction() } case TransactionalRecord(topic, key, value, timestamp, partition) => val replyto = sender val producerRecord = new ProducerRecord( topic, partition.map(new Integer(_)).getOrElse(null), timestamp.map(new java.lang.Long(_)).getOrElse(null), key, value) logger.debug(s"Transactions.Append(topic=$topic)") producer.send(producerRecord, new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { if (exception != null) { replyto ! Failure(exception) } else { replyto ! Success(metadata.offset()) } } }) case req@TransactionCommit() => req(sender) ! { logger.debug("Transactions.Commit()") producer.commitTransaction() } case req@TransactionAbort() => req(sender) ! { logger.debug("Transactions.Abort()") producer.abortTransaction() } } }
Example 8
Source File: KafkaSinkRef.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.akkastream import scala.concurrent._ import scala.util._ import akka._ import akka.actor.ActorSystem import akka.kafka._ import akka.kafka.ConsumerMessage._ import akka.kafka.scaladsl._ import akka.stream._ import akka.stream.scaladsl._ import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata } import org.apache.kafka.common.serialization._ import cloudflow.streamlets._ final class KafkaSinkRef[T]( system: ActorSystem, outlet: CodecOutlet[T], internalKafkaBootstrapServers: String, topic: Topic, killSwitch: SharedKillSwitch, completionPromise: Promise[Dun] ) extends WritableSinkRef[T] { private val producerSettings = ProducerSettings(system, new ByteArraySerializer, new ByteArraySerializer) .withBootstrapServers(topic.bootstrapServers.getOrElse(internalKafkaBootstrapServers)) .withProperties(topic.kafkaProducerProperties) private val producer = producerSettings.createKafkaProducer() def sink: Sink[(T, Committable), NotUsed] = { system.log.info(s"Creating sink for topic: $topic") Flow[(T, Committable)] .map { case (value, offset) ⇒ val key = outlet.partitioner(value) val bytesValue = outlet.codec.encode(value) ProducerMessage.Message[Array[Byte], Array[Byte], Committable](new ProducerRecord(topic.name, key.getBytes("UTF8"), bytesValue), offset) } .via(Producer.flexiFlow(producerSettings.withProducer(producer))) .via(handleTermination) .to(Sink.ignore) .mapMaterializedValue(_ ⇒ NotUsed) } private def handleTermination[I]: Flow[I, I, NotUsed] = Flow[I] .via(killSwitch.flow) .alsoTo( Sink.onComplete { case Success(_) ⇒ system.log.error(s"Stream has completed. Shutting down streamlet...") completionPromise.success(Dun) case Failure(e) ⇒ system.log.error(e, "Stream has failed. Shutting down streamlet...") completionPromise.failure(e) } ) def write(value: T): Future[T] = { val key = outlet.partitioner(value) val bytesKey = keyBytes(key) val bytesValue = outlet.codec.encode(value) val record = new ProducerRecord(topic.name, bytesKey, bytesValue) val promise = Promise[T]() producer.send( record, new Callback() { def onCompletion(metadata: RecordMetadata, exception: Exception) { if (exception == null) promise.success(value) else promise.failure(exception) } } ) promise.future } private def keyBytes(key: String) = if (key != null) key.getBytes("UTF8") else null }