org.apache.kafka.clients.producer.RecordMetadata Scala Examples
The following examples show how to use org.apache.kafka.clients.producer.RecordMetadata.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: KafkaMessagingSystem.scala From amadou with Apache License 2.0 | 5 votes |
package com.mediative.amadou package monitoring import java.util.Properties import com.typesafe.config.Config import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} class KafkaMessagingSystem(config: Config) extends MessagingSystem with Logging { private val properties = KafkaMessagingSystem.readProperties(config) private val producer = new KafkaProducer[String, String](properties) private val topicPrefix = properties.getProperty("topic.prefix") override def publish(topic: String, message: String): Unit = { val topicName = s"$topicPrefix-$topic" logger.info(s"Publishing to $topicName :\n$message\n") producer.send(new ProducerRecord[String, String](topicName, message), new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = if (exception != null) { logger .error(s"Cannot publish to $topicName. Caused by: ${exception.getMessage}", exception) } }) () } override def stop(): Unit = producer.close() } object KafkaMessagingSystem { def readProperties(config: Config): Properties = { val propertiesKeys = Seq( "bootstrap.servers", "acks", "retries", "batch.size", "linger.ms", "buffer.memory", "key.serializer", "value.serializer", "topic.prefix") val properties = new Properties() propertiesKeys.foreach(key => properties.setProperty(key, config.getString(key))) properties } }
Example 2
Source File: HydraKafkaCallback.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import akka.actor.ActorSelection import hydra.core.monitor.HydraMetrics import hydra.core.transport.TransportCallback import hydra.kafka.transport.KafkaTransport.RecordProduceError import org.apache.kafka.clients.producer.{Callback, RecordMetadata} case class HydraKafkaCallback( deliveryId: Long, record: KafkaRecord[_, _], producer: ActorSelection, callback: TransportCallback ) extends Callback { override def onCompletion(metadata: RecordMetadata, e: Exception): Unit = { Option(e) match { case Some(err) => ackError(err) case None => doAck(metadata) } } private def doAck(md: RecordMetadata) = { val kmd = KafkaRecordMetadata(md, deliveryId, record.ackStrategy) producer ! kmd callback.onCompletion(deliveryId, Some(kmd), None) } private def ackError(e: Exception) = { producer ! RecordProduceError(deliveryId, record, e) callback.onCompletion(deliveryId, None, Some(e)) } }
Example 3
Source File: EventProducer.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.kafka import akka.Done import akka.http.scaladsl.model.HttpMethod import com.ing.wbaa.rokku.proxy.config.KafkaSettings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata } import org.apache.kafka.common.serialization.StringSerializer import scala.concurrent.{ ExecutionContext, Future } trait EventProducer { private val logger = new LoggerHandlerWithId import scala.collection.JavaConverters._ protected[this] implicit val kafkaSettings: KafkaSettings protected[this] implicit val executionContext: ExecutionContext private lazy val config: Map[String, Object] = Map[String, Object]( "bootstrap.servers" -> kafkaSettings.bootstrapServers, ProducerConfig.RETRIES_CONFIG -> kafkaSettings.retries, ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG -> kafkaSettings.retriesBackOff, ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG -> kafkaSettings.retriesBackOffMax, CommonClientConfigs.SECURITY_PROTOCOL_CONFIG -> kafkaSettings.protocol, ProducerConfig.MAX_BLOCK_MS_CONFIG -> kafkaSettings.maxblock, ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG -> kafkaSettings.requestTimeoutMs, "ssl.truststore.location" -> kafkaSettings.sslTruststoreLocation, "ssl.truststore.password" -> kafkaSettings.sslTruststorePassword, "ssl.keystore.location" -> kafkaSettings.sslKeystoreLocation, "ssl.keystore.password" -> kafkaSettings.sslKeystorePassword, "ssl.key.password" -> kafkaSettings.sslKeyPassword ) private lazy val kafkaProducer: KafkaProducer[String, String] = new KafkaProducer(config.asJava, new StringSerializer, new StringSerializer) def sendSingleMessage(event: String, topic: String, httpMethod: Option[HttpMethod] = None)(implicit id: RequestId): Future[Done] = { kafkaProducer .send(new ProducerRecord[String, String](topic, event), (metadata: RecordMetadata, exception: Exception) => { exception match { case e: Exception => MetricsFactory.incrementKafkaSendErrors logger.error("error in sending event {} to topic {}, error={}", event, topic, e) throw new Exception(e) case _ => httpMethod.map { m => MetricsFactory.incrementKafkaNotificationsSent(m) } logger.debug("Message sent {} to kafka, offset {}", event, metadata.offset()) } }) match { case _ => Future(Done) } } }
Example 4
Source File: KafkaAsReceiver.scala From spark-http-stream with BSD 2-Clause "Simplified" License | 5 votes |
package org.apache.spark.sql.execution.streaming.http import java.util.Properties import org.apache.kafka.clients.producer.Callback import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.clients.producer.RecordMetadata import org.apache.spark.internal.Logging class KafkaAsReceiver(bootstrapServers: String) extends AbstractActionsHandler with SendStreamActionSupport with Logging { val props = new Properties(); props.put("bootstrap.servers", bootstrapServers); props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); val producer = new KafkaProducer[String, String](props); override def listActionHandlerEntries(requestBody: Map[String, Any]): PartialFunction[String, Map[String, Any]] = { case "actionSendStream" ⇒ handleSendStream(requestBody); } override def destroy() { producer.close(); } override def onReceiveStream(topic: String, rows: Array[RowEx]) = { var index = -1; for (row ← rows) { index += 1; val key = "" + row.batchId + "-" + row.offsetInBatch; //TODO: send an array instead of a string value? val value = row.originalRow(0).toString(); val record = new ProducerRecord[String, String](topic, key, value); producer.send(record, new Callback() { def onCompletion(metadata: RecordMetadata, e: Exception) = { if (e != null) { e.printStackTrace(); logError(e.getMessage); } else { val offset = metadata.offset(); val partition = metadata.partition(); logDebug(s"record is sent to kafka:key=$key, value=$value, partition=$partition, offset=$offset"); } } }); } } } class KafkaAsReceiverFactory extends ActionsHandlerFactory { def createInstance(params: Params) = new KafkaAsReceiver(params.getRequiredString("bootstrapServers")); }
Example 5
Source File: TestConnector.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.connector.test import java.util.ArrayList import java.util.concurrent.LinkedBlockingQueue import scala.concurrent.Future import scala.concurrent.duration._ import scala.collection.JavaConverters._ import org.apache.kafka.clients.producer.RecordMetadata import org.apache.kafka.common.TopicPartition import common.StreamLogging import org.apache.openwhisk.common.Counter import org.apache.openwhisk.core.connector.Message import org.apache.openwhisk.core.connector.MessageConsumer import org.apache.openwhisk.core.connector.MessageProducer class TestConnector(topic: String, override val maxPeek: Int, allowMoreThanMax: Boolean) extends MessageConsumer with StreamLogging { override def peek(duration: FiniteDuration, retry: Int = 0) = { val msgs = new ArrayList[Message] queue.synchronized { queue.drainTo(msgs, if (allowMoreThanMax) Int.MaxValue else maxPeek) msgs.asScala map { m => offset += 1 (topic, -1, offset, m.serialize.getBytes) } } } override def commit(retry: Int = 0) = { if (throwCommitException) { throw new Exception("commit failed") } else { // nothing to do } } def occupancy = queue.size def send(msg: Message): Future[RecordMetadata] = { producer.send(topic, msg) } def send(msgs: Seq[Message]): Future[RecordMetadata] = { import scala.language.reflectiveCalls producer.sendBulk(topic, msgs) } def close() = { closed = true producer.close() } private val producer = new MessageProducer { def send(topic: String, msg: Message, retry: Int = 0): Future[RecordMetadata] = { queue.synchronized { if (queue.offer(msg)) { logging.info(this, s"put: $msg") Future.successful(new RecordMetadata(new TopicPartition(topic, 0), 0, queue.size, -1, Long.box(-1L), -1, -1)) } else { logging.error(this, s"put failed: $msg") Future.failed(new IllegalStateException("failed to write msg")) } } } def sendBulk(topic: String, msgs: Seq[Message]): Future[RecordMetadata] = { queue.synchronized { if (queue.addAll(msgs.asJava)) { logging.info(this, s"put: ${msgs.length} messages") Future.successful(new RecordMetadata(new TopicPartition(topic, 0), 0, queue.size, -1, Long.box(-1L), -1, -1)) } else { logging.error(this, s"put failed: ${msgs.length} messages") Future.failed(new IllegalStateException("failed to write msg")) } } } def close() = {} def sentCount() = counter.next() val counter = new Counter() } var throwCommitException = false private val queue = new LinkedBlockingQueue[Message]() @volatile private var closed = false private var offset = -1L }
Example 6
Source File: ExternalKafkaProcessorSupplier.scala From haystack-trends with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trends.kstream.processor import com.expedia.metrics.MetricData import com.expedia.www.haystack.trends.config.entities.KafkaProduceConfiguration import com.expedia.www.haystack.trends.kstream.serde.TrendMetricSerde.metricRegistry import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.streams.processor.{AbstractProcessor, Processor, ProcessorContext, ProcessorSupplier} import org.slf4j.LoggerFactory class ExternalKafkaProcessorSupplier(kafkaProduceConfig: KafkaProduceConfiguration) extends ProcessorSupplier[String, MetricData] { private val LOGGER = LoggerFactory.getLogger(this.getClass) private val metricPointExternalKafkaSuccessMeter = metricRegistry.meter("metricpoint.kafka-external.success") private val metricPointExternalKafkaFailureMeter = metricRegistry.meter("metricpoint.kafka-external.failure") def get: Processor[String, MetricData] = { new ExternalKafkaProcessor(kafkaProduceConfig: KafkaProduceConfiguration) } def process(key: String, value: MetricData): Unit = { val kafkaMessage = new ProducerRecord(kafkaProduceTopic, key, value) kafkaProducer.send(kafkaMessage, new Callback { override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = { if (e != null) { LOGGER.error(s"Failed to produce the message to kafka for topic=$kafkaProduceTopic, with reason=", e) metricPointExternalKafkaFailureMeter.mark() } else { metricPointExternalKafkaSuccessMeter.mark() } } }) } } }
Example 7
Source File: TransactionalProducer.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.Properties import akka.actor.Actor import akka.actor.Status.{Failure, Success} import akka.event.Logging import com.typesafe.config.Config import io.amient.affinity.Conf import io.amient.affinity.core.actor.{TransactionAbort, TransactionBegin, TransactionCommit, TransactionalRecord} import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.storage.StorageConf import io.amient.affinity.kafka.KafkaStorage.{KafkaConsumerConf, KafkaProducerConf} import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer import scala.collection.JavaConverters._ object KafkaConf extends KafkaConf { override def apply(config: Config): KafkaConf = new KafkaConf().apply(config) } class KafkaConf extends CfgStruct[KafkaConf](classOf[StorageConf]) { val BootstrapServers = string("kafka.bootstrap.servers", true).doc("kafka connection string used for consumer and/or producer") val Producer = struct("kafka.producer", new KafkaProducerConf, false).doc("any settings that the underlying version of kafka producer client supports") val Consumer = struct("kafka.consumer", new KafkaConsumerConf, false).doc("any settings that the underlying version of kafka consumer client supports") } class TransactionalProducer extends Actor { val logger = Logging.getLogger(context.system, this) private[this] var producer: KafkaProducer[Array[Byte], Array[Byte]] = null val kafkaConf = KafkaConf(Conf(context.system.settings.config).Affi.Storage) val producerConfig = new Properties() { if (kafkaConf.Producer.isDefined) { val producerConfig = kafkaConf.Producer.toMap() if (producerConfig.containsKey("bootstrap.servers")) throw new IllegalArgumentException("bootstrap.servers cannot be overriden for KafkaStroage producer") if (producerConfig.containsKey("key.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom key.serializer") if (producerConfig.containsKey("value.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom value.serializer") producerConfig.entrySet.asScala.filter(_.getValue.isDefined).foreach { case (entry) => put(entry.getKey, entry.getValue.apply.toString) } } put("bootstrap.servers", kafkaConf.BootstrapServers()) put("value.serializer", classOf[ByteArraySerializer].getName) put("key.serializer", classOf[ByteArraySerializer].getName) } override def receive: Receive = { case req@TransactionBegin(transactionalId) => req(sender) ! { if (producer == null) { producerConfig.put("transactional.id", transactionalId) producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig) logger.debug(s"Transactions.Init(transactional.id = $transactionalId)") producer.initTransactions() } logger.debug("Transactions.Begin()") producer.beginTransaction() } case TransactionalRecord(topic, key, value, timestamp, partition) => val replyto = sender val producerRecord = new ProducerRecord( topic, partition.map(new Integer(_)).getOrElse(null), timestamp.map(new java.lang.Long(_)).getOrElse(null), key, value) logger.debug(s"Transactions.Append(topic=$topic)") producer.send(producerRecord, new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { if (exception != null) { replyto ! Failure(exception) } else { replyto ! Success(metadata.offset()) } } }) case req@TransactionCommit() => req(sender) ! { logger.debug("Transactions.Commit()") producer.commitTransaction() } case req@TransactionAbort() => req(sender) ! { logger.debug("Transactions.Abort()") producer.abortTransaction() } } }
Example 8
Source File: FailingKafkaStorage.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.concurrent.Future import io.amient.affinity.core.storage.{LogStorageConf, Record} import io.amient.affinity.core.util.MappedJavaFuture import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata} class FailingKafkaStorage(conf: LogStorageConf) extends KafkaLogStorage(conf) { override def append(record: Record[Array[Byte], Array[Byte]]): Future[java.lang.Long] = { val producerRecord = new ProducerRecord(topic, null, record.timestamp, record.key, record.value) new MappedJavaFuture[RecordMetadata, java.lang.Long](producer.send(producerRecord)) { override def map(result: RecordMetadata): java.lang.Long = { if (System.currentTimeMillis() % 3 == 0) throw new RuntimeException("simulated kafka producer error") result.offset() } } } }
Example 9
Source File: KafkaSinkRef.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.akkastream import scala.concurrent._ import scala.util._ import akka._ import akka.actor.ActorSystem import akka.kafka._ import akka.kafka.ConsumerMessage._ import akka.kafka.scaladsl._ import akka.stream._ import akka.stream.scaladsl._ import org.apache.kafka.clients.producer.{ Callback, ProducerRecord, RecordMetadata } import org.apache.kafka.common.serialization._ import cloudflow.streamlets._ final class KafkaSinkRef[T]( system: ActorSystem, outlet: CodecOutlet[T], internalKafkaBootstrapServers: String, topic: Topic, killSwitch: SharedKillSwitch, completionPromise: Promise[Dun] ) extends WritableSinkRef[T] { private val producerSettings = ProducerSettings(system, new ByteArraySerializer, new ByteArraySerializer) .withBootstrapServers(topic.bootstrapServers.getOrElse(internalKafkaBootstrapServers)) .withProperties(topic.kafkaProducerProperties) private val producer = producerSettings.createKafkaProducer() def sink: Sink[(T, Committable), NotUsed] = { system.log.info(s"Creating sink for topic: $topic") Flow[(T, Committable)] .map { case (value, offset) ⇒ val key = outlet.partitioner(value) val bytesValue = outlet.codec.encode(value) ProducerMessage.Message[Array[Byte], Array[Byte], Committable](new ProducerRecord(topic.name, key.getBytes("UTF8"), bytesValue), offset) } .via(Producer.flexiFlow(producerSettings.withProducer(producer))) .via(handleTermination) .to(Sink.ignore) .mapMaterializedValue(_ ⇒ NotUsed) } private def handleTermination[I]: Flow[I, I, NotUsed] = Flow[I] .via(killSwitch.flow) .alsoTo( Sink.onComplete { case Success(_) ⇒ system.log.error(s"Stream has completed. Shutting down streamlet...") completionPromise.success(Dun) case Failure(e) ⇒ system.log.error(e, "Stream has failed. Shutting down streamlet...") completionPromise.failure(e) } ) def write(value: T): Future[T] = { val key = outlet.partitioner(value) val bytesKey = keyBytes(key) val bytesValue = outlet.codec.encode(value) val record = new ProducerRecord(topic.name, bytesKey, bytesValue) val promise = Promise[T]() producer.send( record, new Callback() { def onCompletion(metadata: RecordMetadata, exception: Exception) { if (exception == null) promise.success(value) else promise.failure(exception) } } ) promise.future } private def keyBytes(key: String) = if (key != null) key.getBytes("UTF8") else null }
Example 10
Source File: Producer.scala From fusion-data with Apache License 2.0 | 5 votes |
package kafkasample.demo import java.util.Properties import java.util.concurrent.TimeUnit import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerRecord, RecordMetadata } object Producer { def main(args: Array[String]): Unit = { val props = new Properties() props.put("bootstrap.servers", "localhost:9092") props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer") props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer") val producer = new KafkaProducer[String, String](props) try { run(producer) } finally { TimeUnit.SECONDS.sleep(5) producer.close() } } private def run[K, V](producer: KafkaProducer[String, String]) { val record = new ProducerRecord[String, String]("customerCountries", "羊八井222") producer.send(record, (metadata: RecordMetadata, e: Exception) => { if (e ne null) { e.printStackTrace() } println(s"metadata: $metadata") }) } }
Example 11
Source File: KafkaMessageSender.scala From kafka-with-akka-streams-kafka-streams-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.scala.kafka def batchWriteValue(topic: String, batch: Seq[Array[Byte]]): Seq[RecordMetadata] = { val result = batch.map(value => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, value)).get) producer.flush() result } def close(): Unit = { producer.close() } }
Example 12
Source File: KafkaMessageSender.scala From model-serving-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.modelserving.client import java.util.Properties import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer class MessageSender(val brokers: String) { import MessageSender._ val producer = new KafkaProducer[Array[Byte], Array[Byte]]( providerProperties(brokers, classOf[ByteArraySerializer].getName, classOf[ByteArraySerializer].getName)) def writeKeyValue(topic: String, key: Array[Byte], value: Array[Byte]): Unit = { val result = producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, key, value)).get producer.flush() } def writeValue(topic: String, value: Array[Byte]): Unit = { val result = producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null.asInstanceOf[Array[Byte]], value)).get producer.flush() } def batchWriteValue(topic: String, batch: Seq[Array[Byte]]): Seq[RecordMetadata] = { val result = batch.map(value => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null.asInstanceOf[Array[Byte]], value)).get) producer.flush() result } def close(): Unit = { producer.close() } }
Example 13
Source File: Kafka.scala From event-sourcing-kafka-streams with MIT License | 5 votes |
package org.amitayh.invoices.web import java.time.Duration import java.util.Collections.singletonList import java.util.Properties import cats.effect._ import cats.syntax.apply._ import cats.syntax.functor._ import fs2._ import org.amitayh.invoices.common.Config import org.amitayh.invoices.common.Config.Topics.Topic import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata} import org.log4s.{Logger, getLogger} import scala.collection.JavaConverters._ object Kafka { trait Producer[F[_], K, V] { def send(key: K, value: V): F[RecordMetadata] } object Producer { def apply[F[_]: Async, K, V](producer: KafkaProducer[K, V], topic: Topic[K, V]): Producer[F, K, V] = (key: K, value: V) => Async[F].async { cb => val record = new ProducerRecord(topic.name, key, value) producer.send(record, (metadata: RecordMetadata, exception: Exception) => { if (exception != null) cb(Left(exception)) else cb(Right(metadata)) }) } } def producer[F[_]: Async, K, V](topic: Topic[K, V]): Resource[F, Producer[F, K, V]] = Resource { val create = Sync[F].delay { val props = new Properties props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, Config.BootstrapServers) new KafkaProducer[K, V](props, topic.keySerializer, topic.valueSerializer) } create.map(producer => (Producer(producer, topic), close(producer))) } def subscribe[F[_]: Sync, K, V](topic: Topic[K, V], groupId: String): Stream[F, (K, V)] = { val create = Sync[F].delay { val props = new Properties props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, Config.BootstrapServers) props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId) val consumer = new KafkaConsumer(props, topic.keyDeserializer, topic.valueDeserializer) consumer.subscribe(singletonList(topic.name)) consumer } Stream.bracket(create)(close[F]).flatMap(consume[F, K, V]) } private val logger: Logger = getLogger def log[F[_]: Sync](msg: String): F[Unit] = Sync[F].delay(logger.info(msg)) private def consume[F[_]: Sync, K, V](consumer: KafkaConsumer[K, V]): Stream[F, (K, V)] = for { records <- Stream.repeatEval(Sync[F].delay(consumer.poll(Duration.ofSeconds(1)))) record <- Stream.emits(records.iterator.asScala.toSeq) } yield record.key -> record.value private def close[F[_]: Sync](producer: KafkaProducer[_, _]): F[Unit] = Sync[F].delay(producer.close()) *> log(s"Producer closed") private def close[F[_]: Sync](consumer: KafkaConsumer[_, _]): F[Unit] = Sync[F].delay(consumer.close()) *> log("Consumer closed") }