com.google.common.util.concurrent.FutureCallback Scala Examples
The following examples show how to use com.google.common.util.concurrent.FutureCallback.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: KPLBasedKinesisTestUtils.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 2
Source File: package.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data import com.google.common.util.concurrent.{FutureCallback, Futures, ListenableFuture} import de.nierbeck.floating.data.domain.{BoundingBox, LatLon} import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success, Try} package object server { val Traversable = scala.collection.immutable.Traversable type Traversable[+A] = scala.collection.immutable.Traversable[A] val Iterable = scala.collection.immutable.Iterable type Iterable[+A] = scala.collection.immutable.Iterable[A] val Seq = scala.collection.immutable.Seq type Seq[+A] = scala.collection.immutable.Seq[A] val IndexedSeq = scala.collection.immutable.IndexedSeq type IndexedSeq[+A] = scala.collection.immutable.IndexedSeq[A] def futureToFutureTry[T](f: Future[T])(implicit ec: ExecutionContext): Future[Try[T]] = f.map(Success(_)).recover { case exception: Exception => Failure(exception) } implicit class RichListenableFuture[T](lf: ListenableFuture[T]) { def toFuture: Future[T] = { val p = Promise[T]() Futures.addCallback(lf, new FutureCallback[T] { def onFailure(t: Throwable): Unit = p failure t def onSuccess(result: T): Unit = p success result }) p.future } } def toBoundingBox(bbox: String): BoundingBox = { val bboxCoords: Array[String] = bbox.split(",") val boundingBox: BoundingBox = new BoundingBox(LatLon(bboxCoords(0).toFloat, bboxCoords(1).toFloat), LatLon(bboxCoords(2).toFloat, bboxCoords(3).toFloat)) boundingBox } }
Example 3
Source File: KPLBasedKinesisTestUtils.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 4
Source File: KPLBasedKinesisTestUtils.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 5
Source File: KPLBasedKinesisTestUtils.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils(streamShardCount: Int = 2) extends KinesisTestUtils(streamShardCount) { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 6
Source File: KinesisWriteTask.scala From kinesis-sql with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.kinesis import java.nio.ByteBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Attribute, Cast, UnsafeProjection} import org.apache.spark.sql.types.{BinaryType, StringType} private[kinesis] class KinesisWriteTask(producerConfiguration: Map[String, String], inputSchema: Seq[Attribute]) extends Logging { private var producer: KinesisProducer = _ private val projection = createProjection private val streamName = producerConfiguration.getOrElse( KinesisSourceProvider.SINK_STREAM_NAME_KEY, "") def execute(iterator: Iterator[InternalRow]): Unit = { producer = CachedKinesisProducer.getOrCreate(producerConfiguration) while (iterator.hasNext) { val currentRow = iterator.next() val projectedRow = projection(currentRow) val partitionKey = projectedRow.getString(0) val data = projectedRow.getBinary(1) sendData(partitionKey, data) } } def sendData(partitionKey: String, data: Array[Byte]): String = { var sentSeqNumbers = new String val future = producer.addUserRecord(streamName, partitionKey, ByteBuffer.wrap(data)) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = { logError(s"Writing to $streamName failed due to ${t.getCause}") } override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId sentSeqNumbers = result.getSequenceNumber } } Futures.addCallback(future, kinesisCallBack) producer.flushSync() sentSeqNumbers } def close(): Unit = { if (producer != null) { producer.flush() producer = null } } private def createProjection: UnsafeProjection = { val partitionKeyExpression = inputSchema .find(_.name == KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME).getOrElse( throw new IllegalStateException("Required attribute " + s"'${KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME}' not found")) partitionKeyExpression.dataType match { case StringType | BinaryType => // ok case t => throw new IllegalStateException(s"${KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME} " + "attribute type must be a String or BinaryType") } val dataExpression = inputSchema.find(_.name == KinesisWriter.DATA_ATTRIBUTE_NAME).getOrElse( throw new IllegalStateException("Required attribute " + s"'${KinesisWriter.DATA_ATTRIBUTE_NAME}' not found") ) dataExpression.dataType match { case StringType | BinaryType => // ok case t => throw new IllegalStateException(s"${KinesisWriter.DATA_ATTRIBUTE_NAME} " + "attribute type must be a String or BinaryType") } UnsafeProjection.create( Seq(Cast(partitionKeyExpression, StringType), Cast(dataExpression, StringType)), inputSchema) } }
Example 7
Source File: KPLBasedKinesisTestUtils.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes()) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 8
Source File: JavaConverters.scala From troy with Apache License 2.0 | 5 votes |
package troy package driver import com.google.common.util.concurrent.{ FutureCallback, Futures, ListenableFuture } import scala.concurrent.{ Future, Promise } object JavaConverters { // http://stackoverflow.com/a/19528638/234998 implicit class RichListenableFuture[T](lf: ListenableFuture[T]) { def asScala: Future[T] = { val p = Promise[T]() Futures.addCallback(lf, new FutureCallback[T] { def onFailure(t: Throwable): Unit = p failure t def onSuccess(result: T): Unit = p success result }) p.future } } }
Example 9
Source File: CassandraResultSetWrapper.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.cassandra.utils implicit def resultSetFutureToScala(f: ResultSetFuture): Future[ResultSet] = { val p = Promise[ResultSet]() Futures.addCallback(f, new FutureCallback[ResultSet] { def onSuccess(r: ResultSet): Unit = p success r def onFailure(t: Throwable): Unit = p failure t }) p.future } }
Example 10
Source File: CassandraWrapper.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.cassandra import com.datastax.driver.core.{ResultSet, ResultSetFuture} import com.google.common.util.concurrent.{FutureCallback, Futures} import scala.concurrent.{Future, Promise} import scala.language.{implicitConversions, postfixOps} object CassandraWrapper { implicit def resultSetFutureToScala(f: ResultSetFuture): Future[ResultSet] = { val p = Promise[ResultSet]() Futures.addCallback(f, new FutureCallback[ResultSet] { def onSuccess(r: ResultSet): Unit = p success r def onFailure(t: Throwable): Unit = p failure t }) p.future } }