com.google.common.util.concurrent.Futures Scala Examples
The following examples show how to use com.google.common.util.concurrent.Futures.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: KPLBasedKinesisTestUtils.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 2
Source File: package.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data import com.google.common.util.concurrent.{FutureCallback, Futures, ListenableFuture} import de.nierbeck.floating.data.domain.{BoundingBox, LatLon} import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success, Try} package object server { val Traversable = scala.collection.immutable.Traversable type Traversable[+A] = scala.collection.immutable.Traversable[A] val Iterable = scala.collection.immutable.Iterable type Iterable[+A] = scala.collection.immutable.Iterable[A] val Seq = scala.collection.immutable.Seq type Seq[+A] = scala.collection.immutable.Seq[A] val IndexedSeq = scala.collection.immutable.IndexedSeq type IndexedSeq[+A] = scala.collection.immutable.IndexedSeq[A] def futureToFutureTry[T](f: Future[T])(implicit ec: ExecutionContext): Future[Try[T]] = f.map(Success(_)).recover { case exception: Exception => Failure(exception) } implicit class RichListenableFuture[T](lf: ListenableFuture[T]) { def toFuture: Future[T] = { val p = Promise[T]() Futures.addCallback(lf, new FutureCallback[T] { def onFailure(t: Throwable): Unit = p failure t def onSuccess(result: T): Unit = p success result }) p.future } } def toBoundingBox(bbox: String): BoundingBox = { val bboxCoords: Array[String] = bbox.split(",") val boundingBox: BoundingBox = new BoundingBox(LatLon(bboxCoords(0).toFloat, bboxCoords(1).toFloat), LatLon(bboxCoords(2).toFloat, bboxCoords(3).toFloat)) boundingBox } }
Example 3
Source File: KPLBasedKinesisTestUtils.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 4
Source File: KPLBasedKinesisTestUtils.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 5
Source File: KPLBasedKinesisTestUtils.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils(streamShardCount: Int = 2) extends KinesisTestUtils(streamShardCount) { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes(StandardCharsets.UTF_8)) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 6
Source File: KinesisWriteTask.scala From kinesis-sql with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.kinesis import java.nio.ByteBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Attribute, Cast, UnsafeProjection} import org.apache.spark.sql.types.{BinaryType, StringType} private[kinesis] class KinesisWriteTask(producerConfiguration: Map[String, String], inputSchema: Seq[Attribute]) extends Logging { private var producer: KinesisProducer = _ private val projection = createProjection private val streamName = producerConfiguration.getOrElse( KinesisSourceProvider.SINK_STREAM_NAME_KEY, "") def execute(iterator: Iterator[InternalRow]): Unit = { producer = CachedKinesisProducer.getOrCreate(producerConfiguration) while (iterator.hasNext) { val currentRow = iterator.next() val projectedRow = projection(currentRow) val partitionKey = projectedRow.getString(0) val data = projectedRow.getBinary(1) sendData(partitionKey, data) } } def sendData(partitionKey: String, data: Array[Byte]): String = { var sentSeqNumbers = new String val future = producer.addUserRecord(streamName, partitionKey, ByteBuffer.wrap(data)) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = { logError(s"Writing to $streamName failed due to ${t.getCause}") } override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId sentSeqNumbers = result.getSequenceNumber } } Futures.addCallback(future, kinesisCallBack) producer.flushSync() sentSeqNumbers } def close(): Unit = { if (producer != null) { producer.flush() producer = null } } private def createProjection: UnsafeProjection = { val partitionKeyExpression = inputSchema .find(_.name == KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME).getOrElse( throw new IllegalStateException("Required attribute " + s"'${KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME}' not found")) partitionKeyExpression.dataType match { case StringType | BinaryType => // ok case t => throw new IllegalStateException(s"${KinesisWriter.PARTITION_KEY_ATTRIBUTE_NAME} " + "attribute type must be a String or BinaryType") } val dataExpression = inputSchema.find(_.name == KinesisWriter.DATA_ATTRIBUTE_NAME).getOrElse( throw new IllegalStateException("Required attribute " + s"'${KinesisWriter.DATA_ATTRIBUTE_NAME}' not found") ) dataExpression.dataType match { case StringType | BinaryType => // ok case t => throw new IllegalStateException(s"${KinesisWriter.DATA_ATTRIBUTE_NAME} " + "attribute type must be a String or BinaryType") } UnsafeProjection.create( Seq(Cast(partitionKeyExpression, StringType), Cast(dataExpression, StringType)), inputSchema) } }
Example 7
Source File: KPLBasedKinesisTestUtils.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.kinesis import java.nio.ByteBuffer import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import com.amazonaws.services.kinesis.producer.{KinesisProducer => KPLProducer, KinesisProducerConfiguration, UserRecordResult} import com.google.common.util.concurrent.{FutureCallback, Futures} private[kinesis] class KPLBasedKinesisTestUtils extends KinesisTestUtils { override protected def getProducer(aggregate: Boolean): KinesisDataGenerator = { if (!aggregate) { new SimpleDataGenerator(kinesisClient) } else { new KPLDataGenerator(regionName) } } } private[kinesis] class KPLDataGenerator(regionName: String) extends KinesisDataGenerator { private lazy val producer: KPLProducer = { val conf = new KinesisProducerConfiguration() .setRecordMaxBufferedTime(1000) .setMaxConnections(1) .setRegion(regionName) .setMetricsLevel("none") new KPLProducer(conf) } override def sendData(streamName: String, data: Seq[Int]): Map[String, Seq[(Int, String)]] = { val shardIdToSeqNumbers = new mutable.HashMap[String, ArrayBuffer[(Int, String)]]() data.foreach { num => val str = num.toString val data = ByteBuffer.wrap(str.getBytes()) val future = producer.addUserRecord(streamName, str, data) val kinesisCallBack = new FutureCallback[UserRecordResult]() { override def onFailure(t: Throwable): Unit = {} // do nothing override def onSuccess(result: UserRecordResult): Unit = { val shardId = result.getShardId val seqNumber = result.getSequenceNumber() val sentSeqNumbers = shardIdToSeqNumbers.getOrElseUpdate(shardId, new ArrayBuffer[(Int, String)]()) sentSeqNumbers += ((num, seqNumber)) } } Futures.addCallback(future, kinesisCallBack) } producer.flushSync() shardIdToSeqNumbers.toMap } }
Example 8
Source File: JavaConverters.scala From troy with Apache License 2.0 | 5 votes |
package troy package driver import com.google.common.util.concurrent.{ FutureCallback, Futures, ListenableFuture } import scala.concurrent.{ Future, Promise } object JavaConverters { // http://stackoverflow.com/a/19528638/234998 implicit class RichListenableFuture[T](lf: ListenableFuture[T]) { def asScala: Future[T] = { val p = Promise[T]() Futures.addCallback(lf, new FutureCallback[T] { def onFailure(t: Throwable): Unit = p failure t def onSuccess(result: T): Unit = p success result }) p.future } } }
Example 9
Source File: CassandraResultSetWrapper.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.cassandra.utils implicit def resultSetFutureToScala(f: ResultSetFuture): Future[ResultSet] = { val p = Promise[ResultSet]() Futures.addCallback(f, new FutureCallback[ResultSet] { def onSuccess(r: ResultSet): Unit = p success r def onFailure(t: Throwable): Unit = p failure t }) p.future } }
Example 10
Source File: CassandraWrapper.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.cassandra import com.datastax.driver.core.{ResultSet, ResultSetFuture} import com.google.common.util.concurrent.{FutureCallback, Futures} import scala.concurrent.{Future, Promise} import scala.language.{implicitConversions, postfixOps} object CassandraWrapper { implicit def resultSetFutureToScala(f: ResultSetFuture): Future[ResultSet] = { val p = Promise[ResultSet]() Futures.addCallback(f, new FutureCallback[ResultSet] { def onSuccess(r: ResultSet): Unit = p success r def onFailure(t: Throwable): Unit = p failure t }) p.future } }
Example 11
Source File: StartAwsCliAction.scala From berilia with Apache License 2.0 | 5 votes |
package com.criteo.dev.cluster.aws import com.criteo.dev.cluster._ import com.criteo.dev.cluster.aws.AwsUtilities.NodeRole import com.criteo.dev.cluster.config.GlobalConfig import com.google.common.util.concurrent.Futures import org.jclouds.compute.ComputeService import org.jclouds.compute.domain.NodeMetadata.Status import org.slf4j.LoggerFactory import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global @Public object StartAwsCliAction extends CliAction[Unit] { override def command : String = "start-aws" override def usageArgs = List(Option("cluster.id")) override def help() = "Starting a stopped cluster with given cluster.id. " + "If no cluster.id is given, start all stopped clusters owned by this user. " + "Note that AWS assigns new public ips for restarted nodes in the cluster. " + "Expiration countdown is extended for restarted clusters." private val logger = LoggerFactory.getLogger(StartAwsCliAction.getClass) def applyInternal(args: List[String], config: GlobalConfig): Unit = { logger.info("Connecting to AWS to fetch nodes to start.") val conf = config.backCompat var clusters = getClusters(args, conf) clusters = clusters.filter(u => u.master.getStatus().equals(Status.SUSPENDED)) if (clusters.size == 0) { logger.info("No clusters found matching criteria.") } //start nodes in parallel val nodesToStart = clusters.flatMap(_.slaves) ++ clusters.map(_.master) logger.info(s"Starting ${nodesToStart.size} nodes in parallel.") val computeService = AwsUtilities.getComputeService(conf) val startFutures = nodesToStart.filter(u => u.getStatus().equals(Status.SUSPENDED)) .map(u => GeneralUtilities.getFuture { val shortId = AwsUtilities.stripRegion(conf, u.getId) logger.info(s"Starting instance $shortId") AwsUtilities.retryAwsAction(new RetryableStart(computeService, u.getId())) } ) val aggStartFuture = Future.sequence(startFutures) Await.result(aggStartFuture, Duration.Inf) //lookup nodes and reconfigure. //Sometimes /etc/hosts gets regenerated on new instances, sometimes they do not. val startedClusters = clusters.map(_.master.getId).toSet val newClusters = getClusters(args, conf).filter(c => startedClusters.contains(c.master.getId)) ConfigureHostsAction(config.target.aws, newClusters) newClusters.foreach(c => ExtendAwsCliAction.extend(conf, c, reset=true)) logger.info("Restarting services in parallel.") StartClusterAction(config.target.aws, newClusters) //print out all the infos. newClusters.foreach(c => AwsUtilities.printClusterInfo(conf, c)) } def getClusters(args: List[String], conf: Map[String, String]): Iterable[JcloudCluster] = { if (args.length == 1) { //instance id is optional val instanceId = args(0) Set(AwsUtilities.getUserCluster(conf, instanceId)) } else { AwsUtilities.getUserClusters(conf) } } } class RetryableStart(computeService: ComputeService, nodeid: String) extends Retryable[Any] { def action: Unit = { computeService.resumeNode(nodeid) } }
Example 12
Source File: BigtableDoFnTest.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.bigtable import java.util.concurrent.ConcurrentLinkedQueue import com.google.cloud.bigtable.grpc.BigtableSession import com.google.common.cache.{Cache, CacheBuilder} import com.google.common.util.concurrent.{Futures, ListenableFuture} import com.spotify.scio.testing._ import com.spotify.scio.transforms.BaseAsyncLookupDoFn.CacheSupplier import scala.jdk.CollectionConverters._ import scala.util.{Failure, Success} class BigtableDoFnTest extends PipelineSpec { "BigtableDoFn" should "work" in { val fn = new TestBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs (1 to 10).map(x => (x, x.toString)) } it should "work with cache" in { val fn = new TestCachingBigtableDoFn val output = runWithData((1 to 10) ++ (6 to 15))(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs ((1 to 10) ++ (6 to 15)).map(x => (x, x.toString)) BigtableDoFnTest.queue.asScala.toSet should contain theSameElementsAs (1 to 15) BigtableDoFnTest.queue.size() should be <= 20 } it should "work with failures" in { val fn = new TestFailingBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)).map { kv => val r = kv.getValue.asScala match { case Success(v) => v case Failure(e) => e.getMessage } (kv.getKey, r) } output should contain theSameElementsAs (1 to 10).map { x => val prefix = if (x % 2 == 0) "success" else "failure" (x, prefix + x.toString) } } } object BigtableDoFnTest { val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]() } class TestBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = Futures.immediateFuture(input.toString) } class TestCachingBigtableDoFn extends BigtableDoFn[Int, String](null, 100, new TestCacheSupplier) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = { BigtableDoFnTest.queue.add(input) Futures.immediateFuture(input.toString) } } class TestFailingBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = if (input % 2 == 0) { Futures.immediateFuture("success" + input) } else { Futures.immediateFailedFuture(new RuntimeException("failure" + input)) } } class TestCacheSupplier extends CacheSupplier[Int, String, java.lang.Long] { override def createCache(): Cache[java.lang.Long, String] = CacheBuilder.newBuilder().build[java.lang.Long, String]() override def getKey(input: Int): java.lang.Long = input.toLong }