monix.reactive.Observable Scala Examples
The following examples show how to use monix.reactive.Observable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DiscardingHandler.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.network import com.wavesplatform.utils.{Schedulers, ScorexLogging} import io.netty.channel.ChannelHandler.Sharable import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext} import monix.execution.schedulers.SchedulerService import monix.reactive.Observable @Sharable class DiscardingHandler(blockchainReadiness: Observable[Boolean]) extends ChannelDuplexHandler with ScorexLogging { private implicit val scheduler: SchedulerService = Schedulers.fixedPool(2, "discarding-handler") private val lastReadiness = lastObserved(blockchainReadiness) override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = msg match { case RawBytes(code @ (TransactionSpec.messageCode | PBTransactionSpec.messageCode), _) if !lastReadiness().contains(true) => log.trace(s"${id(ctx)} Discarding incoming message $code") case _ => super.channelRead(ctx, msg) } }
Example 2
Source File: UtxPoolSynchronizerSpec.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.network import java.util.concurrent.CountDownLatch import com.wavesplatform.account.PublicKey import com.wavesplatform.common.utils.EitherExt2 import com.wavesplatform.lang.ValidationError import com.wavesplatform.settings.SynchronizationSettings.UtxSynchronizerSettings import com.wavesplatform.transaction.smart.script.trace.TracedResult import com.wavesplatform.transaction.{GenesisTransaction, Transaction} import com.wavesplatform.utils.Schedulers import io.netty.util.HashedWheelTimer import monix.execution.atomic.AtomicInt import monix.reactive.Observable import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers} import scala.concurrent.duration._ class UtxPoolSynchronizerSpec extends FreeSpec with Matchers with BeforeAndAfterAll { private[this] val timer = new HashedWheelTimer private[this] val scheduler = Schedulers.timeBoundedFixedPool(timer, 1.second, 2, "test-utx-sync") "UtxPoolSynchronizer" - { val latch = new CountDownLatch(5) val counter = AtomicInt(10) def countTransactions(tx: Transaction): TracedResult[ValidationError, Boolean] = { if (counter.getAndDecrement() > 5) while (!Thread.currentThread().isInterrupted) {} else latch.countDown() TracedResult(Right(true)) } "accepts only those transactions from network which can be validated quickly" in withUPS(countTransactions) { ups => 1 to 10 foreach { i => ups.publish(GenesisTransaction.create(PublicKey(new Array[Byte](32)).toAddress, i * 10L, 0L).explicitGet()) } latch.await() counter.get() shouldEqual 0 } } private def withUPS(putIfNew: Transaction => TracedResult[ValidationError, Boolean])(f: UtxPoolSynchronizer => Unit): Unit = { val ups = new UtxPoolSynchronizerImpl(UtxSynchronizerSettings(1000, 2, 1000, true), putIfNew, (_, _) => (), Observable.empty, scheduler) f(ups) ups.close() } override protected def afterAll(): Unit = { super.afterAll() scheduler.shutdown() timer.stop() } }
Example 3
Source File: Context.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.extensions import akka.actor.ActorSystem import com.wavesplatform.account.Address import com.wavesplatform.api.common._ import com.wavesplatform.common.state.ByteStr import com.wavesplatform.events.{BlockchainUpdated, UtxEvent} import com.wavesplatform.lang.ValidationError import com.wavesplatform.settings.WavesSettings import com.wavesplatform.state.Blockchain import com.wavesplatform.transaction.smart.script.trace.TracedResult import com.wavesplatform.transaction.{Asset, DiscardedBlocks, Transaction} import com.wavesplatform.utils.Time import com.wavesplatform.utx.UtxPool import com.wavesplatform.wallet.Wallet import monix.eval.Task import monix.reactive.Observable trait Context { def settings: WavesSettings def blockchain: Blockchain def rollbackTo(blockId: ByteStr): Task[Either[ValidationError, DiscardedBlocks]] def time: Time def wallet: Wallet def utx: UtxPool def transactionsApi: CommonTransactionsApi def blocksApi: CommonBlocksApi def accountsApi: CommonAccountsApi def assetsApi: CommonAssetsApi def broadcastTransaction(tx: Transaction): TracedResult[ValidationError, Boolean] def spendableBalanceChanged: Observable[(Address, Asset)] def blockchainUpdated: Observable[BlockchainUpdated] def utxEvents: Observable[UtxEvent] def actorSystem: ActorSystem }
Example 4
Source File: ScorexLogging.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.utils import monix.eval.Task import monix.execution.{CancelableFuture, Scheduler} import monix.reactive.Observable import org.slf4j.{Logger, LoggerFactory} case class LoggerFacade(logger: Logger) { def trace(message: => String, throwable: Throwable): Unit = { if (logger.isTraceEnabled) logger.trace(message, throwable) } def trace(message: => String): Unit = { if (logger.isTraceEnabled) logger.trace(message) } def debug(message: => String, arg: Any): Unit = { if (logger.isDebugEnabled) logger.debug(message, arg) } def debug(message: => String): Unit = { if (logger.isDebugEnabled) logger.debug(message) } def info(message: => String): Unit = { if (logger.isInfoEnabled) logger.info(message) } def info(message: => String, arg: Any): Unit = { if (logger.isInfoEnabled) logger.info(message, arg) } def info(message: => String, throwable: Throwable): Unit = { if (logger.isInfoEnabled) logger.info(message, throwable) } def warn(message: => String): Unit = { if (logger.isWarnEnabled) logger.warn(message) } def warn(message: => String, throwable: Throwable): Unit = { if (logger.isWarnEnabled) logger.warn(message, throwable) } def error(message: => String): Unit = { if (logger.isErrorEnabled) logger.error(message) } def error(message: => String, throwable: Throwable): Unit = { if (logger.isErrorEnabled) logger.error(message, throwable) } } trait ScorexLogging { protected lazy val log = LoggerFacade(LoggerFactory.getLogger(this.getClass)) implicit class TaskExt[A](t: Task[A]) { def runAsyncLogErr(implicit s: Scheduler): CancelableFuture[A] = logErr.runToFuture(s) def logErr: Task[A] = { t.onErrorHandleWith(ex => { log.error(s"Error executing task", ex) Task.raiseError[A](ex) }) } } implicit class ObservableExt[A](o: Observable[A]) { def logErr: Observable[A] = { o.onErrorHandleWith(ex => { log.error(s"Error observing item", ex) Observable.raiseError[A](ex) }) } } }
Example 5
Source File: package.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api import com.wavesplatform.account.Address import com.wavesplatform.common.state.ByteStr import com.wavesplatform.database.{DBExt, Keys} import com.wavesplatform.state.{Diff, Height} import com.wavesplatform.transaction.CreateAliasTransaction import com.wavesplatform.transaction.lease.LeaseTransaction import monix.reactive.Observable import org.iq80.leveldb.DB package object common extends BalanceDistribution with AddressTransactions { def aliasesOfAddress(db: DB, maybeDiff: => Option[(Height, Diff)], address: Address): Observable[(Height, CreateAliasTransaction)] = { val disabledAliases = db.get(Keys.disabledAliases) addressTransactions(db, maybeDiff, address, Some(address), Set(CreateAliasTransaction.typeId), None) .collect { case (height, cat: CreateAliasTransaction, true) if disabledAliases.isEmpty || !disabledAliases(cat.alias) => height -> cat } } def activeLeases( db: DB, maybeDiff: Option[(Height, Diff)], address: Address, leaseIsActive: ByteStr => Boolean ): Observable[(Height, LeaseTransaction)] = addressTransactions(db, maybeDiff, address, None, Set(LeaseTransaction.typeId), None) .collect { case (h, lt: LeaseTransaction, true) if leaseIsActive(lt.id()) => h -> lt } }
Example 6
Source File: CommonBlocksApi.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.common import com.wavesplatform.account.Address import com.wavesplatform.api.BlockMeta import com.wavesplatform.block.Block.BlockId import com.wavesplatform.common.state.ByteStr import com.wavesplatform.state.Blockchain import com.wavesplatform.transaction.Transaction import monix.reactive.Observable trait CommonBlocksApi { def blockDelay(blockId: BlockId, blockNum: Int): Option[Long] def currentHeight: Int def block(blockId: BlockId): Option[(BlockMeta, Seq[(Transaction, Boolean)])] def blockAtHeight(height: Int): Option[(BlockMeta, Seq[(Transaction, Boolean)])] def blocksRange(fromHeight: Int, toHeight: Int): Observable[(BlockMeta, Seq[(Transaction, Boolean)])] def blocksRange(fromHeight: Int, toHeight: Int, generatorAddress: Address): Observable[(BlockMeta, Seq[(Transaction, Boolean)])] def meta(id: ByteStr): Option[BlockMeta] def metaAtHeight(height: Int): Option[BlockMeta] def metaRange(fromHeight: Int, toHeight: Int): Observable[BlockMeta] } object CommonBlocksApi { def apply( blockchain: Blockchain, metaAt: Int => Option[BlockMeta], blockInfoAt: Int => Option[(BlockMeta, Seq[(Transaction, Boolean)])] ): CommonBlocksApi = new CommonBlocksApi { private def fixHeight(h: Int) = if (h <= 0) blockchain.height + h else h private def heightOf(id: ByteStr): Option[Int] = blockchain.heightOf(id) def blocksRange(fromHeight: Int, toHeight: Int): Observable[(BlockMeta, Seq[(Transaction, Boolean)])] = Observable.fromIterable((fixHeight(fromHeight) to fixHeight(toHeight)).flatMap(h => blockInfoAt(h))) def blocksRange(fromHeight: Int, toHeight: Int, generatorAddress: Address): Observable[(BlockMeta, Seq[(Transaction, Boolean)])] = Observable.fromIterable( (fixHeight(fromHeight) to fixHeight(toHeight)) .flatMap(h => metaAt(h)) .collect { case m if m.header.generator.toAddress == generatorAddress => m.height } .flatMap(h => blockInfoAt(h)) ) def blockDelay(blockId: BlockId, blockNum: Int): Option[Long] = heightOf(blockId) .map { maxHeight => val minHeight = maxHeight - blockNum.max(1) val allHeaders = (minHeight to maxHeight).flatMap(h => metaAt(h)) val totalPeriod = allHeaders .sliding(2) .map { pair => pair(1).header.timestamp - pair(0).header.timestamp } .sum totalPeriod / allHeaders.size } def currentHeight: Int = blockchain.height def blockAtHeight(height: Int): Option[(BlockMeta, Seq[(Transaction, Boolean)])] = blockInfoAt(height) def metaAtHeight(height: Int): Option[BlockMeta] = metaAt(height) def meta(id: ByteStr): Option[BlockMeta] = heightOf(id).flatMap(metaAt) def metaRange(fromHeight: Int, toHeight: Int): Observable[BlockMeta] = Observable.fromIterable((fixHeight(fromHeight) to fixHeight(toHeight)).flatMap(h => metaAt(h))) def block(blockId: BlockId): Option[(BlockMeta, Seq[(Transaction, Boolean)])] = heightOf(blockId).flatMap(h => blockInfoAt(h)) } }
Example 7
Source File: CommonAssetsApi.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.common import com.wavesplatform.account.Address import com.wavesplatform.api.common.CommonAssetsApi.AssetInfo import com.wavesplatform.crypto import com.wavesplatform.database.{AddressId, KeyTags} import com.wavesplatform.state.{AssetDescription, Blockchain, Diff, Portfolio} import com.wavesplatform.transaction.Asset.IssuedAsset import com.wavesplatform.transaction.assets.IssueTransaction import monix.reactive.Observable import org.iq80.leveldb.DB trait CommonAssetsApi { def description(assetId: IssuedAsset): Option[AssetDescription] def fullInfo(assetId: IssuedAsset): Option[AssetInfo] def wavesDistribution(height: Int, after: Option[Address]): Observable[(Address, Long)] def assetDistribution(asset: IssuedAsset, height: Int, after: Option[Address]): Observable[(Address, Long)] } object CommonAssetsApi { final case class AssetInfo(description: AssetDescription, issueTransaction: Option[IssueTransaction], sponsorBalance: Option[Long]) def apply(diff: => Diff, db: DB, blockchain: Blockchain): CommonAssetsApi = new CommonAssetsApi { def description(assetId: IssuedAsset): Option[AssetDescription] = { blockchain.assetDescription(assetId) } def fullInfo(assetId: IssuedAsset): Option[AssetInfo] = for { assetInfo <- blockchain.assetDescription(assetId) sponsorBalance = if (assetInfo.sponsorship != 0) Some(blockchain.wavesPortfolio(assetInfo.issuer.toAddress).spendableBalance) else None } yield AssetInfo(assetInfo, blockchain.transactionInfo(assetId.id).collect { case (_, it: IssueTransaction, true) => it }, sponsorBalance) override def wavesDistribution(height: Int, after: Option[Address]): Observable[(Address, Long)] = balanceDistribution( db, height, after, if (height == blockchain.height) diff.portfolios else Map.empty[Address, Portfolio], KeyTags.WavesBalance.prefixBytes, bs => AddressId.fromByteArray(bs.slice(2, bs.length - 4)), _.balance ) override def assetDistribution(asset: IssuedAsset, height: Int, after: Option[Address]): Observable[(Address, Long)] = balanceDistribution( db, height, after, if (height == blockchain.height) diff.portfolios else Map.empty[Address, Portfolio], KeyTags.AssetBalance.prefixBytes ++ asset.id.arr, bs => AddressId.fromByteArray(bs.slice(2 + crypto.DigestLength, bs.length - 4)), _.assets.getOrElse(asset, 0L) ) } }
Example 8
Source File: BalanceDistribution.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.common import com.google.common.collect.AbstractIterator import com.google.common.primitives.{Ints, Longs} import com.wavesplatform.account.Address import com.wavesplatform.database.{AddressId, DBExt, DBResource, Keys} import com.wavesplatform.state.Portfolio import com.wavesplatform.state.Portfolio.longSemigroup import monix.eval.Task import monix.reactive.Observable import org.iq80.leveldb.DB import scala.annotation.tailrec import scala.jdk.CollectionConverters._ trait BalanceDistribution { import BalanceDistribution._ def balanceDistribution( db: DB, height: Int, after: Option[Address], overrides: Map[Address, Portfolio], globalPrefix: Array[Byte], addressId: Array[Byte] => AddressId, balanceOf: Portfolio => Long ): Observable[(Address, Long)] = db.resourceObservable .flatMap { resource => resource.iterator.seek( globalPrefix ++ after.flatMap(address => resource.get(Keys.addressId(address))).fold(Array.emptyByteArray)(id => Longs.toByteArray(id.toLong + 1)) ) Observable.fromIterator(Task(new BalanceIterator(resource, globalPrefix, addressId, balanceOf, height, overrides).asScala.filter(_._2 > 0))) } } object BalanceDistribution { class BalanceIterator( resource: DBResource, globalPrefix: Array[Byte], addressId: Array[Byte] => AddressId, balanceOf: Portfolio => Long, height: Int, private var pendingPortfolios: Map[Address, Portfolio] ) extends AbstractIterator[(Address, Long)] { @inline private def stillSameAddress(expected: AddressId): Boolean = resource.iterator.hasNext && { val maybeNext = resource.iterator.peekNext().getKey maybeNext.startsWith(globalPrefix) && addressId(maybeNext) == expected } @tailrec private def findNextBalance(): Option[(Address, Long)] = { if (!resource.iterator.hasNext) None else { val current = resource.iterator.next() if (!current.getKey.startsWith(globalPrefix)) None else { val aid = addressId(current.getKey) val address = resource.get(Keys.idToAddress(aid)) var balance = Longs.fromByteArray(current.getValue) var currentHeight = Ints.fromByteArray(current.getKey.takeRight(4)) while (stillSameAddress(aid)) { val next = resource.iterator.next() val nextHeight = Ints.fromByteArray(next.getKey.takeRight(4)) if (nextHeight <= height) { currentHeight = nextHeight balance = Longs.fromByteArray(next.getValue) } } pendingPortfolios -= address val adjustedBalance = longSemigroup.combine(balance, pendingPortfolios.get(address).fold(0L)(balanceOf)) if (currentHeight <= height && adjustedBalance > 0) Some(address -> adjustedBalance) else findNextBalance() } } } override def computeNext(): (Address, Long) = findNextBalance() match { case Some(balance) => balance case None => if (pendingPortfolios.nonEmpty) { val (address, portfolio) = pendingPortfolios.head pendingPortfolios -= address address -> balanceOf(portfolio) } else { endOfData() } } } }
Example 9
Source File: ChannelClosedHandler.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.network import io.netty.channel.ChannelHandler.Sharable import io.netty.channel._ import monix.execution.Scheduler import monix.reactive.Observable import monix.reactive.subjects.ConcurrentSubject @Sharable class ChannelClosedHandler private extends ChannelHandlerAdapter { private val closedChannelsSubject = ConcurrentSubject.publish[Channel](Scheduler.global) override def handlerAdded(ctx: ChannelHandlerContext): Unit = { ctx.channel().closeFuture().addListener((cf: ChannelFuture) => closedChannelsSubject.onNext(cf.channel())) super.handlerAdded(ctx) } def shutdown(): Unit = { closedChannelsSubject.onComplete() } } object ChannelClosedHandler { def apply(): (ChannelClosedHandler, Observable[Channel]) = { val h = new ChannelClosedHandler() (h, h.closedChannelsSubject) } }
Example 10
Source File: ObservableMain.scala From advanced-scala-code with Apache License 2.0 | 5 votes |
object ObservableMain { def main(args: Array[String]): Unit = { import monix.reactive.Observable val linesO = Observable.defer { import java.io.{BufferedReader, FileReader} val br = new BufferedReader(new FileReader("license.txt")) Observable.fromLinesReaderUnsafe(br) } printStatistics(linesO) printStatistics(linesO) def printStatistics(linesO: Observable[String]): Unit = { val wordsO = linesO.flatMap { line => val arr = line.split("\\W").map(_.toLowerCase) .map(_.trim).filter(!_.isEmpty) Observable.fromIterable(arr.toIterable) } val rawResultO = wordsO.foldLeft(Map.empty[String, Int]) { (acc, next) => acc.get(next) match { case None => acc + (next -> 1) case Some(num) => acc + (next -> (1 + num)) } } import monix.reactive.Consumer val finalResultT = rawResultO.map { map => map.toList.sortWith( _._2 > _._2).take(5).map(_._1) }.consumeWith(Consumer.head) import monix.execution.Scheduler.Implicits.global val resultCF = finalResultT.runToFuture import scala.concurrent.Await import scala.concurrent.duration._ val result = Await.result(resultCF, 30.seconds) println(result) // List(the, or, of, and, to) } } import cats.kernel.Monoid import monix.reactive.Observable def alternativeMonoid(wordsO: Observable[String]): Unit = { import cats.instances.int.catsKernelStdGroupForInt import cats.instances.map.catsKernelStdMonoidForMap val listT = wordsO.map(word => Map(word -> 1)).toListL val totals = listT.map { data => Monoid[Map[String, Int]].combineAll(data) } // totalsT: Task[Map[String, Int]] val finalResultT = totals.map { data => data.toList.sortWith( _._2 > _._2).take(5).map(_._1) } import monix.execution.Scheduler.Implicits.global import scala.concurrent.Await import scala.concurrent.duration._ val result = Await.result(finalResultT.runToFuture, 30.seconds) println(result) } }
Example 11
Source File: AssetsApiGrpcImpl.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.grpc import com.wavesplatform.account.Address import com.wavesplatform.api.common.{CommonAccountsApi, CommonAssetsApi} import com.wavesplatform.api.http.ApiError.TransactionDoesNotExist import com.wavesplatform.common.state.ByteStr import com.wavesplatform.protobuf.transaction.PBTransactions import com.wavesplatform.state.AssetScriptInfo import com.wavesplatform.state.AssetDescription import com.wavesplatform.transaction.Asset.IssuedAsset import io.grpc.stub.StreamObserver import monix.execution.Scheduler import monix.reactive.Observable import scala.concurrent.Future class AssetsApiGrpcImpl(assetsApi: CommonAssetsApi, accountsApi: CommonAccountsApi)(implicit sc: Scheduler) extends AssetsApiGrpc.AssetsApi { override def getInfo(request: AssetRequest): Future[AssetInfoResponse] = Future { val result = for (info <- assetsApi.fullInfo(IssuedAsset(request.assetId))) yield { val result = assetInfoResponse(info.description).withSponsorBalance(info.sponsorBalance.getOrElse(0)) info.issueTransaction.map(_.toPB).fold(result)(result.withIssueTransaction) } result.explicitGetErr(TransactionDoesNotExist) } override def getNFTList(request: NFTRequest, responseObserver: StreamObserver[NFTResponse]): Unit = responseObserver.interceptErrors { val addressOption: Option[Address] = if (request.address.isEmpty) None else Some(request.address.toAddress) val afterAssetId: Option[IssuedAsset] = if (request.afterAssetId.isEmpty) None else Some(IssuedAsset(ByteStr(request.afterAssetId.toByteArray))) val responseStream = addressOption match { case Some(address) => accountsApi .nftList(address, afterAssetId) .map { case (a, d) => NFTResponse(a.id.toPBByteString, Some(assetInfoResponse(d))) } .take(request.limit) case _ => Observable.empty } responseObserver.completeWith(responseStream) } private def assetInfoResponse(d: AssetDescription): AssetInfoResponse = AssetInfoResponse( d.issuer, d.name.toStringUtf8, d.description.toStringUtf8, d.decimals, d.reissuable, d.totalVolume.longValue, d.script.map { case AssetScriptInfo(script, complexity) => ScriptData( PBTransactions.toPBScript(Some(script)), script.expr.toString, complexity ) }, d.sponsorship ) }
Example 12
Source File: AccountsApiGrpcImpl.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.api.grpc import com.google.protobuf.ByteString import com.google.protobuf.wrappers.{BytesValue, StringValue} import com.wavesplatform.account.{Address, Alias} import com.wavesplatform.api.common.CommonAccountsApi import com.wavesplatform.common.state.ByteStr import com.wavesplatform.protobuf.Amount import com.wavesplatform.protobuf.transaction.PBTransactions import com.wavesplatform.transaction.Asset import io.grpc.stub.StreamObserver import monix.execution.Scheduler import monix.reactive.Observable import scala.concurrent.Future class AccountsApiGrpcImpl(commonApi: CommonAccountsApi)(implicit sc: Scheduler) extends AccountsApiGrpc.AccountsApi { private def loadWavesBalance(address: Address): BalanceResponse = { val details = commonApi.balanceDetails(address) BalanceResponse().withWaves( BalanceResponse.WavesBalances( details.regular, details.generating, details.available, details.effective, details.leaseIn, details.leaseOut ) ) } private def assetBalanceResponse(v: (Asset.IssuedAsset, Long)): BalanceResponse = BalanceResponse().withAsset(Amount(v._1.id.toPBByteString, v._2)) override def getBalances(request: BalancesRequest, responseObserver: StreamObserver[BalanceResponse]): Unit = responseObserver.interceptErrors { val addressOption: Option[Address] = if (request.address.isEmpty) None else Some(request.address.toAddress) val assetIds: Seq[Asset] = request.assets.map(id => if (id.isEmpty) Asset.Waves else Asset.IssuedAsset(ByteStr(id.toByteArray))) val responseStream = (addressOption, assetIds) match { case (Some(address), Seq()) => Observable(loadWavesBalance(address)) ++ commonApi.portfolio(address).map(assetBalanceResponse) case (Some(address), nonEmptyList) => Observable .fromIterable(nonEmptyList) .map { case Asset.Waves => loadWavesBalance(address) case ia: Asset.IssuedAsset => assetBalanceResponse(ia -> commonApi.assetBalance(address, ia)) } case (None, Seq(_)) => // todo: asset distribution Observable.empty case (None, _) => // multiple distributions are not supported Observable.empty } responseObserver.completeWith(responseStream) } override def getScript(request: AccountRequest): Future[ScriptData] = Future { commonApi.script(request.address.toAddress) match { case Some(desc) => ScriptData(PBTransactions.toPBScript(Some(desc.script)), desc.script.expr.toString, desc.verifierComplexity) case None => ScriptData() } } override def getActiveLeases(request: AccountRequest, responseObserver: StreamObserver[TransactionResponse]): Unit = responseObserver.interceptErrors { val transactions = commonApi.activeLeases(request.address.toAddress) val result = transactions.map { case (height, transaction) => TransactionResponse(transaction.id(), height, Some(transaction.toPB)) } responseObserver.completeWith(result) } override def getDataEntries(request: DataRequest, responseObserver: StreamObserver[DataEntryResponse]): Unit = responseObserver.interceptErrors { val stream = if (request.key.nonEmpty) { Observable.fromIterable(commonApi.data(request.address.toAddress, request.key)) } else { commonApi.dataStream(request.address.toAddress, Option(request.key).filter(_.nonEmpty)) } responseObserver.completeWith(stream.map(de => DataEntryResponse(request.address, Some(PBTransactions.toPBDataEntry(de))))) } override def resolveAlias(request: StringValue): Future[BytesValue] = Future { val result = for { alias <- Alias.create(request.value) address <- commonApi.resolveAlias(alias) } yield BytesValue(ByteString.copyFrom(address.bytes)) result.explicitGetErr() } }
Example 13
Source File: GrpcMonix.scala From grpcmonix with MIT License | 5 votes |
package grpcmonix import com.google.common.util.concurrent.ListenableFuture import io.grpc.stub.StreamObserver import monix.eval.{Callback, Task} import monix.execution.Ack.{Continue, Stop} import monix.execution.{Ack, Scheduler} import monix.reactive.Observable import monix.reactive.observables.ObservableLike.{Operator, Transformer} import monix.reactive.observers.Subscriber import monix.reactive.subjects.PublishSubject import org.reactivestreams.{Subscriber => SubscriberR} import scalapb.grpc.Grpc import scala.concurrent.Future object GrpcMonix { type GrpcOperator[I, O] = StreamObserver[O] => StreamObserver[I] def guavaFutureToMonixTask[T](future: ListenableFuture[T]): Task[T] = Task.deferFuture { Grpc.guavaFuture2ScalaFuture(future) } def grpcOperatorToMonixOperator[I,O](grpcOperator: GrpcOperator[I,O]): Operator[I,O] = { outputSubsriber: Subscriber[O] => val outputObserver: StreamObserver[O] = monixSubscriberToGrpcObserver(outputSubsriber) val inputObserver: StreamObserver[I] = grpcOperator(outputObserver) grpcObserverToMonixSubscriber(inputObserver, outputSubsriber.scheduler) } def monixSubscriberToGrpcObserver[T](subscriber: Subscriber[T]): StreamObserver[T] = new StreamObserver[T] { override def onError(t: Throwable): Unit = subscriber.onError(t) override def onCompleted(): Unit = subscriber.onComplete() override def onNext(value: T): Unit = subscriber.onNext(value) } def reactiveSubscriberToGrpcObserver[T](subscriber: SubscriberR[_ >: T]): StreamObserver[T] = new StreamObserver[T] { override def onError(t: Throwable): Unit = subscriber.onError(t) override def onCompleted(): Unit = subscriber.onComplete() override def onNext(value: T): Unit = subscriber.onNext(value) } def grpcObserverToMonixSubscriber[T](observer: StreamObserver[T], s: Scheduler): Subscriber[T] = new Subscriber[T] { override implicit def scheduler: Scheduler = s override def onError(t: Throwable): Unit = observer.onError(t) override def onComplete(): Unit = observer.onCompleted() override def onNext(value: T): Future[Ack] = try { observer.onNext(value) Continue } catch { case t: Throwable => observer.onError(t) Stop } } def grpcObserverToMonixCallback[T](observer: StreamObserver[T]): Callback[T] = new Callback[T] { override def onError(t: Throwable): Unit = observer.onError(t) override def onSuccess(value: T): Unit = { observer.onNext(value) observer.onCompleted() } } def liftByGrpcOperator[I, O](observable: Observable[I], operator: GrpcOperator[I, O]): Observable[O] = observable.liftByOperator( grpcOperatorToMonixOperator(operator) ) def unliftByTransformer[I, O](transformer: Transformer[I, O], subscriber: Subscriber[O]): Subscriber[I] = new Subscriber[I] { private[this] val subject = PublishSubject[I]() subject.transform(transformer).subscribe(subscriber) override implicit def scheduler: Scheduler = subscriber.scheduler override def onError(t: Throwable): Unit = subject.onError(t) override def onComplete(): Unit = subject.onComplete() override def onNext(value: I): Future[Ack] = subject.onNext(value) } }
Example 14
Source File: MonixStreams.scala From neotypes with MIT License | 5 votes |
package neotypes.monix.stream import monix.eval.Task import monix.reactive.Observable trait MonixStreams { implicit final val monixStream: neotypes.Stream.Aux[Observable, Task] = new neotypes.Stream[Observable] { override final type F[T] = Task[T] override final def init[T](value: () => Task[Option[T]]): Observable[T] = Observable .repeatEvalF(Task.suspend(value())) .takeWhile(option => option.isDefined) .collect { case Some(t) => t } override final def onComplete[T](s: Observable[T])(f: => Task[Unit]): Observable[T] = s.guarantee(f) override final def fToS[T](f: Task[Observable[T]]): Observable[T] = Observable.fromTask(f).flatten } }
Example 15
Source File: RouteGuideMonixService.scala From grpcexample with MIT License | 5 votes |
package io.grpc.routeguide import java.util.concurrent.TimeUnit.NANOSECONDS import java.util.logging.Logger import concurrency.AtomicRef import monix.eval.Task import monix.reactive.Observable class RouteGuideMonixService(features: Seq[Feature]) extends RouteGuideGrpcMonix.RouteGuide { val logger: Logger = Logger.getLogger(classOf[RouteGuideMonixService].getName) private val routeNotes: AtomicRef[Map[Point, Seq[RouteNote]]] = new AtomicRef(Map.empty) override def routeChat(notes: Observable[RouteNote]): Observable[RouteNote] = notes.flatMap { note => addNote(note) Observable.fromIterable(getNotes(note.getLocation)) } private def findFeature(point: Point): Feature = { features.find { feature => feature.getLocation.latitude == point.latitude && feature.getLocation.longitude == point.longitude } getOrElse new Feature(location = Some(point)) } private def getNotes(point: Point): Seq[RouteNote] = { routeNotes.get.getOrElse(point, Seq.empty) } private def addNote(note: RouteNote): Unit = { routeNotes.updateAndGet { notes => val existingNotes = notes.getOrElse(note.getLocation, Seq.empty) val updatedNotes = existingNotes :+ note notes + (note.getLocation -> updatedNotes) } } }
Example 16
Source File: MonixStreamingTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.impl.monix import java.nio.ByteBuffer import monix.eval.Task import monix.reactive.Observable import sttp.client.testing.ConvertToFuture import sttp.client.testing.streaming.StreamingTest abstract class MonixStreamingTest extends StreamingTest[Task, Observable[ByteBuffer]] { override implicit val convertToFuture: ConvertToFuture[Task] = convertMonixTaskToFuture override def bodyProducer(chunks: Iterable[Array[Byte]]): Observable[ByteBuffer] = Observable .fromIterable(chunks) .map(ByteBuffer.wrap) override def bodyConsumer(stream: Observable[ByteBuffer]): Task[String] = stream .flatMap(v => Observable.fromIterable(v.array())) .toListL .map(bs => new String(bs.toArray, "utf8")) }
Example 17
Source File: HttpClientHighLevelMonixWebsocketTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.httpclient.monix import java.nio.ByteBuffer import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable import sttp.client._ import sttp.client.httpclient.WebSocketHandler import sttp.client.impl.monix.{TaskMonadAsyncError, convertMonixTaskToFuture} import sttp.client.monad.MonadError import sttp.client.testing.ConvertToFuture import sttp.client.testing.websocket.HighLevelWebsocketTest import sttp.client.ws.WebSocket import sttp.client.testing.HttpTest.wsEndpoint import scala.concurrent.duration._ class HttpClientHighLevelMonixWebsocketTest extends HighLevelWebsocketTest[Task, WebSocketHandler] { implicit val backend: SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = HttpClientMonixBackend().runSyncUnsafe() implicit val convertToFuture: ConvertToFuture[Task] = convertMonixTaskToFuture implicit val monad: MonadError[Task] = TaskMonadAsyncError override def createHandler: Option[Int] => Task[WebSocketHandler[WebSocket[Task]]] = _ => MonixWebSocketHandler() it should "handle backpressure correctly" in { basicRequest .get(uri"$wsEndpoint/ws/echo") .openWebsocketF(createHandler(None)) .flatMap { response => val ws = response.result send(ws, 1000) >> eventually(10.millis, 500) { ws.isOpen.map(_ shouldBe true) } } .toFuture() } override def eventually[T](interval: FiniteDuration, attempts: Int)(f: => Task[T]): Task[T] = { (Task.sleep(interval) >> f).onErrorRestart(attempts.toLong) } }
Example 18
Source File: HttpClientMonixBackend.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.httpclient.monix import java.io.InputStream import java.net.http.HttpRequest.BodyPublishers import java.net.http.{HttpClient, HttpRequest} import java.nio.ByteBuffer import cats.effect.Resource import monix.eval.Task import monix.execution.Scheduler import monix.reactive.Observable import org.reactivestreams.FlowAdapters import sttp.client.httpclient.HttpClientBackend.EncodingHandler import sttp.client.httpclient.{HttpClientAsyncBackend, HttpClientBackend, WebSocketHandler} import sttp.client.impl.monix.TaskMonadAsyncError import sttp.client.testing.SttpBackendStub import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions} import scala.util.{Success, Try} class HttpClientMonixBackend private ( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler) extends HttpClientAsyncBackend[Task, Observable[ByteBuffer]]( client, TaskMonadAsyncError, closeClient, customizeRequest, customEncodingHandler ) { override def streamToRequestBody(stream: Observable[ByteBuffer]): Task[HttpRequest.BodyPublisher] = { monad.eval(BodyPublishers.fromPublisher(FlowAdapters.toFlowPublisher(stream.toReactivePublisher))) } override def responseBodyToStream(responseBody: InputStream): Try[Observable[ByteBuffer]] = { Success( Observable .fromInputStream(Task.now(responseBody)) .map(ByteBuffer.wrap) .guaranteeCase(_ => Task(responseBody.close())) ) } } object HttpClientMonixBackend { private def apply( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler ): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = new FollowRedirectsBackend( new HttpClientMonixBackend(client, closeClient, customizeRequest, customEncodingHandler)(s) ) def apply( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Task[SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Task.eval( HttpClientMonixBackend( HttpClientBackend.defaultClient(options), closeClient = true, customizeRequest, customEncodingHandler )(s) ) def resource( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Resource[Task, SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Resource.make(apply(options, customizeRequest, customEncodingHandler))(_.close()) def usingClient( client: HttpClient, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = HttpClientMonixBackend(client, closeClient = false, customizeRequest, customEncodingHandler)(s) def stub: SttpBackendStub[Task, Observable[ByteBuffer], WebSocketHandler] = SttpBackendStub(TaskMonadAsyncError) }
Example 19
Source File: ExampleMonixInterop.scala From caliban with Apache License 2.0 | 5 votes |
package caliban.interop.monix import caliban.GraphQL.graphQL import caliban.ResponseValue.{ ObjectValue, StreamValue } import caliban.RootResolver import cats.effect.ExitCode import monix.eval.{ Task, TaskApp } import monix.reactive.Observable import zio.{ Runtime, ZEnv } import zio.interop.reactivestreams._ object ExampleMonixInterop extends TaskApp { import caliban.interop.monix.implicits._ implicit val runtime: Runtime[ZEnv] = Runtime.default case class Number(value: Int) case class Queries(numbers: List[Number], randomNumber: Task[Number]) case class Subscriptions(numbers: Observable[Int]) val numbers = List(1, 2, 3, 4).map(Number) val randomNumber = Task.eval(scala.util.Random.nextInt()).map(Number) val queries = Queries(numbers, randomNumber) val subscriptions = Subscriptions(Observable.fromIterable(List(1, 2, 3))) val api = graphQL(RootResolver(Some(queries), Option.empty[Unit], Some(subscriptions))) val query = """ { numbers { value } randomNumber { value } }""" val subscription = """ subscription { numbers }""" override def run(args: List[String]): Task[ExitCode] = for { interpreter <- api.interpreterAsync _ <- interpreter.checkAsync(query) result <- interpreter.executeAsync(query) _ <- Task.eval(println(result.data)) _ <- interpreter.checkAsync(subscription) result <- interpreter.executeAsync(subscription) _ <- result.data match { case ObjectValue(("numbers", StreamValue(stream)) :: Nil) => // get back an observable val obs = Observable.fromReactivePublisher(runtime.unsafeRun(stream.toPublisher)) obs.foreachL(println) case _ => Task.eval(println(s"Wrong result: ${result.data}")) } } yield ExitCode.Success }
Example 20
Source File: package.scala From caliban with Apache License 2.0 | 5 votes |
package caliban.interop.monix import caliban.schema.{ Schema, SubscriptionSchema } import caliban.{ CalibanError, GraphQL, GraphQLInterpreter, GraphQLResponse, InputValue } import cats.effect.ConcurrentEffect import monix.eval.Task import monix.reactive.Observable import zio.Runtime package object implicits { implicit class MonixGraphQLInterpreter[R, E](underlying: GraphQLInterpreter[R, E]) { def executeAsync( query: String, operationName: Option[String] = None, variables: Map[String, InputValue] = Map(), extensions: Map[String, InputValue] = Map(), skipValidation: Boolean = false, enableIntrospection: Boolean = true )(implicit runtime: Runtime[R]): Task[GraphQLResponse[E]] = MonixInterop.executeAsync(underlying)( query, operationName, variables, extensions, skipValidation = skipValidation, enableIntrospection = enableIntrospection ) def checkAsync(query: String)(implicit runtime: Runtime[R]): Task[Unit] = MonixInterop.checkAsync(underlying)(query) } implicit class MonixGraphQL[R, E](underlying: GraphQL[R]) { def interpreterAsync(implicit runtime: Runtime[R]): Task[GraphQLInterpreter[R, CalibanError]] = MonixInterop.interpreterAsync(underlying) } implicit def effectSchema[R, A](implicit ev: Schema[R, A], ev2: ConcurrentEffect[Task]): Schema[R, Task[A]] = MonixInterop.taskSchema implicit def observableSchema[R, A]( implicit ev: Schema[R, A], ev2: ConcurrentEffect[Task] ): Schema[R, Observable[A]] = MonixInterop.observableSchema(16) // Size of the internal buffer. Use a power of 2 for best performance. implicit def observableSubscriptionSchema[A]: SubscriptionSchema[Observable[A]] = new SubscriptionSchema[Observable[A]] {} }
Example 21
Source File: MonixInterop.scala From caliban with Apache License 2.0 | 5 votes |
package caliban.interop.monix import caliban.introspection.adt.__Type import caliban.schema.Step.{ QueryStep, StreamStep } import caliban.schema.{ Schema, Step, Types } import caliban.{ CalibanError, GraphQL, GraphQLInterpreter, GraphQLResponse, InputValue } import cats.effect.ConcurrentEffect import monix.eval.{ Task => MonixTask } import monix.reactive.Observable import zio._ import zio.interop.catz._ import zio.interop.reactivestreams._ import zio.stream.ZStream import zio.query.ZQuery object MonixInterop { def executeAsync[R, E](graphQL: GraphQLInterpreter[R, E])( query: String, operationName: Option[String] = None, variables: Map[String, InputValue] = Map(), extensions: Map[String, InputValue] = Map(), skipValidation: Boolean = false, enableIntrospection: Boolean = true )(implicit runtime: Runtime[R]): MonixTask[GraphQLResponse[E]] = MonixTask.async { cb => val execution = graphQL.execute( query, operationName, variables, extensions, skipValidation = skipValidation, enableIntrospection = enableIntrospection ) runtime.unsafeRunAsync(execution)(exit => cb(exit.toEither)) } def checkAsync[R](graphQL: GraphQLInterpreter[R, Any])(query: String)(implicit runtime: Runtime[R]): MonixTask[Unit] = MonixTask.async(cb => runtime.unsafeRunAsync(graphQL.check(query))(exit => cb(exit.toEither))) def interpreterAsync[R]( graphQL: GraphQL[R] )(implicit runtime: Runtime[R]): MonixTask[GraphQLInterpreter[R, CalibanError]] = MonixTask.async(cb => runtime.unsafeRunAsync(graphQL.interpreter)(exit => cb(exit.toEither))) def taskSchema[R, A](implicit ev: Schema[R, A], ev2: ConcurrentEffect[MonixTask]): Schema[R, MonixTask[A]] = new Schema[R, MonixTask[A]] { override def toType(isInput: Boolean, isSubscription: Boolean): __Type = ev.toType(isInput, isSubscription) override def optional: Boolean = ev.optional override def resolve(value: MonixTask[A]): Step[R] = QueryStep(ZQuery.fromEffect(value.to[Task].map(ev.resolve))) } def observableSchema[R, A]( queueSize: Int )(implicit ev: Schema[R, A], ev2: ConcurrentEffect[MonixTask]): Schema[R, Observable[A]] = new Schema[R, Observable[A]] { override def optional: Boolean = true override def toType(isInput: Boolean, isSubscription: Boolean): __Type = { val t = ev.toType(isInput, isSubscription) if (isSubscription) t else Types.makeList(if (ev.optional) t else Types.makeNonNull(t)) } override def resolve(value: Observable[A]): Step[R] = StreamStep( ZStream .fromEffect( MonixTask .deferAction(implicit sc => MonixTask.eval(value.toReactivePublisher.toStream(queueSize).map(ev.resolve)) ) .to[Task] ) .flatten ) } }
Example 22
Source File: MonixOps.scala From phobos with Apache License 2.0 | 5 votes |
package ru.tinkoff.phobos.ops import javax.xml.stream.XMLStreamConstants import monix.eval.Task import monix.reactive.Observable import ru.tinkoff.phobos.decoding.{Cursor, ElementDecoder, XmlDecoder, XmlStreamReader} private[phobos] trait MonixOps { implicit def DecoderOps[A](xmlDecoder: XmlDecoder[A]): DecoderOps[A] = new DecoderOps[A](xmlDecoder) } class DecoderOps[A](private val xmlDecoder: XmlDecoder[A]) extends AnyVal { def decodeFromObservable(observable: Observable[Array[Byte]], charset: String = "UTF-8"): Task[A] = { val sr: XmlStreamReader = XmlDecoder.createStreamReader(charset) val cursor = new Cursor(sr) observable .foldLeftL[ElementDecoder[A]](xmlDecoder.elementdecoder) { (decoder, bytes) => sr.getInputFeeder.feedInput(bytes, 0, bytes.length) do { cursor.next() } while (cursor.getEventType == XMLStreamConstants.DTD || cursor.getEventType == XMLStreamConstants.START_DOCUMENT) if (decoder.result(cursor.history).isRight) { decoder } else { decoder.decodeAsElement(cursor, xmlDecoder.localname, xmlDecoder.namespaceuri) } } .flatMap { a => sr.getInputFeeder.endOfInput() Task.fromEither(a.result(cursor.history)) } } }
Example 23
Source File: MonixTest.scala From phobos with Apache License 2.0 | 5 votes |
package ru.tinkoff.phobos.test import java.util.concurrent.Executors import monix.execution.Scheduler import monix.reactive.Observable import org.scalatest.AsyncWordSpec import ru.tinkoff.phobos.annotations.{ElementCodec, XmlCodec} import ru.tinkoff.phobos.decoding.XmlDecoder import ru.tinkoff.phobos.syntax.text import ru.tinkoff.phobos.monix._ class MonixTest extends AsyncWordSpec { implicit val scheduler: Scheduler = Scheduler(Executors.newScheduledThreadPool(4)) "Monix decoder" should { "decode case classes correctly" in { @ElementCodec case class Bar(@text txt: Int) @XmlCodec("foo") case class Foo(qux: Int, maybeBar: Option[Bar], bars: List[Bar]) val xml = """ |<foo> | <qux>1234</qux> | <bars>2</bars> | <maybeBar>1</maybeBar> | <bars>3</bars> |</foo> |""".stripMargin val foo = Foo(1234, Some(Bar(1)), List(Bar(2), Bar(3))) val observable = Observable.fromIterable(xml.toIterable.map(x => Array(x.toByte))) XmlDecoder[Foo] .decodeFromObservable(observable) .map(result => assert(result == foo)) .runToFuture } } }
Example 24
Source File: MergeByCommitCallbackTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.{FunSuite, Matchers} import scala.concurrent.duration._ import scala.concurrent.Await import monix.execution.Scheduler.Implicits.global import org.apache.kafka.clients.consumer.OffsetCommitCallback import org.apache.kafka.common.TopicPartition import org.scalacheck.Gen import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers { val commitCallbacks: List[Commit] = List.fill(4)(new Commit { override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] = Task.unit }) val committableOffsetsGen: Gen[CommittableOffset] = for { partition <- Gen.posNum[Int] offset <- Gen.posNum[Long] commit <- Gen.oneOf(commitCallbacks) } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit) test("merge by commit callback works") { forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets => val partitions = offsets.map(_.topicPartition) val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets) received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys } received.size should be <= 4 } } test("merge by commit callback for multiple consumers") { withRunningKafka { val count = 10000 val topicName = "monix-kafka-merge-by-commit" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val producer = KafkaProducerSink[String, String](producerCfg, io) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicName, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = Observable .range(0, 4) .mergeMap(i => createConsumer(i.toInt, topicName).take(500)) .bufferTumbling(2000) .map(CommittableOffsetBatch.mergeByCommitCallback) .map { offsetBatches => assert(offsetBatches.length == 4) } .completedL Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) } } private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = { val cfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = s"kafka-tests-$i", autoOffsetReset = AutoOffsetReset.Earliest ) KafkaConsumerObservable .manualCommit[String, String](cfg, List(topicName)) .executeOn(io) .map(_.committableOffset) } }
Example 25
Source File: MonixKafkaTopicRegexTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.FunSuite import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.concurrent.duration._ class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit { val topicsRegex = "monix-kafka-tests-.*".r val topicMatchingRegex = "monix-kafka-tests-anything" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val consumerCfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = "kafka-tests", clientId = "monix-kafka-1-0-consumer-test", autoOffsetReset = AutoOffsetReset.Earliest ) test("publish one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io) val consumer = Await.result(consumerTask.runToFuture, 60.seconds) try { // Publishing one message val send = producer.send(topicMatchingRegex, "my-message") Await.result(send.runToFuture, 30.seconds) val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList assert(records === List("my-message")) } finally { Await.result(producer.close().runToFuture, Duration.Inf) consumer.close() } } } test("listen for one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io) try { // Publishing one message val send = producer.send(topicMatchingRegex, "test-message") Await.result(send.runToFuture, 30.seconds) val first = consumer.take(1).map(_.value()).firstL val result = Await.result(first.runToFuture, 30.seconds) assert(result === "test-message") } finally { Await.result(producer.close().runToFuture, Duration.Inf) } } } test("full producer/consumer test when subscribed to topics regex") { withRunningKafka { val count = 10000 val producer = KafkaProducerSink[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = consumer .map(_.value()) .toListL val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) assert(result.map(_.toInt).sum === (0 until count).sum) } } }
Example 26
Source File: MergeByCommitCallbackTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.{FunSuite, Matchers} import scala.concurrent.duration._ import scala.concurrent.Await import monix.execution.Scheduler.Implicits.global import org.apache.kafka.clients.consumer.OffsetCommitCallback import org.apache.kafka.common.TopicPartition import org.scalacheck.Gen import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers { val commitCallbacks: List[Commit] = List.fill(4)(new Commit { override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] = Task.unit }) val committableOffsetsGen: Gen[CommittableOffset] = for { partition <- Gen.posNum[Int] offset <- Gen.posNum[Long] commit <- Gen.oneOf(commitCallbacks) } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit) test("merge by commit callback works") { forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets => val partitions = offsets.map(_.topicPartition) val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets) received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys } received.size should be <= 4 } } test("merge by commit callback for multiple consumers") { withRunningKafka { val count = 10000 val topicName = "monix-kafka-merge-by-commit" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val producer = KafkaProducerSink[String, String](producerCfg, io) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicName, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = Observable .range(0, 4) .mergeMap(i => createConsumer(i.toInt, topicName).take(500)) .bufferTumbling(2000) .map(CommittableOffsetBatch.mergeByCommitCallback) .map { offsetBatches => assert(offsetBatches.length == 4) } .completedL Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) } } private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = { val cfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = s"kafka-tests-$i", autoOffsetReset = AutoOffsetReset.Earliest ) KafkaConsumerObservable .manualCommit[String, String](cfg, List(topicName)) .executeOn(io) .map(_.committableOffset) } }
Example 27
Source File: MonixKafkaTopicRegexTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.FunSuite import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.concurrent.duration._ class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit { val topicsRegex = "monix-kafka-tests-.*".r val topicMatchingRegex = "monix-kafka-tests-anything" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val consumerCfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = "kafka-tests", clientId = "monix-kafka-1-0-consumer-test", autoOffsetReset = AutoOffsetReset.Earliest ) test("publish one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io) val consumer = Await.result(consumerTask.runToFuture, 60.seconds) try { // Publishing one message val send = producer.send(topicMatchingRegex, "my-message") Await.result(send.runToFuture, 30.seconds) val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList assert(records === List("my-message")) } finally { Await.result(producer.close().runToFuture, Duration.Inf) consumer.close() } } } test("listen for one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io) try { // Publishing one message val send = producer.send(topicMatchingRegex, "test-message") Await.result(send.runToFuture, 30.seconds) val first = consumer.take(1).map(_.value()).firstL val result = Await.result(first.runToFuture, 30.seconds) assert(result === "test-message") } finally { Await.result(producer.close().runToFuture, Duration.Inf) } } } test("full producer/consumer test when subscribed to topics regex") { withRunningKafka { val count = 10000 val producer = KafkaProducerSink[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = consumer .map(_.value()) .toListL val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) assert(result.map(_.toInt).sum === (0 until count).sum) } } }
Example 28
Source File: MergeByCommitCallbackTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.{FunSuite, Matchers} import scala.concurrent.duration._ import scala.concurrent.Await import monix.execution.Scheduler.Implicits.global import org.apache.kafka.clients.consumer.OffsetCommitCallback import org.apache.kafka.common.TopicPartition import org.scalacheck.Gen import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks class MergeByCommitCallbackTest extends FunSuite with KafkaTestKit with ScalaCheckDrivenPropertyChecks with Matchers { val commitCallbacks: List[Commit] = List.fill(4)(new Commit { override def commitBatchSync(batch: Map[TopicPartition, Long]): Task[Unit] = Task.unit override def commitBatchAsync(batch: Map[TopicPartition, Long], callback: OffsetCommitCallback): Task[Unit] = Task.unit }) val committableOffsetsGen: Gen[CommittableOffset] = for { partition <- Gen.posNum[Int] offset <- Gen.posNum[Long] commit <- Gen.oneOf(commitCallbacks) } yield CommittableOffset(new TopicPartition("topic", partition), offset, commit) test("merge by commit callback works") { forAll(Gen.nonEmptyListOf(committableOffsetsGen)) { offsets => val partitions = offsets.map(_.topicPartition) val received: List[CommittableOffsetBatch] = CommittableOffsetBatch.mergeByCommitCallback(offsets) received.foreach { batch => partitions should contain allElementsOf batch.offsets.keys } received.size should be <= 4 } } test("merge by commit callback for multiple consumers") { withRunningKafka { val count = 10000 val topicName = "monix-kafka-merge-by-commit" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val producer = KafkaProducerSink[String, String](producerCfg, io) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicName, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = Observable .range(0, 4) .mergeMap(i => createConsumer(i.toInt, topicName).take(500)) .bufferTumbling(2000) .map(CommittableOffsetBatch.mergeByCommitCallback) .map { offsetBatches => assert(offsetBatches.length == 4) } .completedL Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) } } private def createConsumer(i: Int, topicName: String): Observable[CommittableOffset] = { val cfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = s"kafka-tests-$i", autoOffsetReset = AutoOffsetReset.Earliest ) KafkaConsumerObservable .manualCommit[String, String](cfg, List(topicName)) .executeOn(io) .map(_.committableOffset) } }
Example 29
Source File: MonixKafkaTopicRegexTest.scala From monix-kafka with Apache License 2.0 | 5 votes |
package monix.kafka import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.kafka.config.AutoOffsetReset import monix.reactive.Observable import org.apache.kafka.clients.producer.ProducerRecord import org.scalatest.FunSuite import scala.collection.JavaConverters._ import scala.concurrent.Await import scala.concurrent.duration._ class MonixKafkaTopicRegexTest extends FunSuite with KafkaTestKit { val topicsRegex = "monix-kafka-tests-.*".r val topicMatchingRegex = "monix-kafka-tests-anything" val producerCfg = KafkaProducerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), clientId = "monix-kafka-1-0-producer-test" ) val consumerCfg = KafkaConsumerConfig.default.copy( bootstrapServers = List("127.0.0.1:6001"), groupId = "kafka-tests", clientId = "monix-kafka-1-0-consumer-test", autoOffsetReset = AutoOffsetReset.Earliest ) test("publish one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumerTask = KafkaConsumerObservable.createConsumer[String, String](consumerCfg, topicsRegex).executeOn(io) val consumer = Await.result(consumerTask.runToFuture, 60.seconds) try { // Publishing one message val send = producer.send(topicMatchingRegex, "my-message") Await.result(send.runToFuture, 30.seconds) val records = consumer.poll(10.seconds.toMillis).asScala.map(_.value()).toList assert(records === List("my-message")) } finally { Await.result(producer.close().runToFuture, Duration.Inf) consumer.close() } } } test("listen for one message when subscribed to topics regex") { withRunningKafka { val producer = KafkaProducer[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io) try { // Publishing one message val send = producer.send(topicMatchingRegex, "test-message") Await.result(send.runToFuture, 30.seconds) val first = consumer.take(1).map(_.value()).firstL val result = Await.result(first.runToFuture, 30.seconds) assert(result === "test-message") } finally { Await.result(producer.close().runToFuture, Duration.Inf) } } } test("full producer/consumer test when subscribed to topics regex") { withRunningKafka { val count = 10000 val producer = KafkaProducerSink[String, String](producerCfg, io) val consumer = KafkaConsumerObservable[String, String](consumerCfg, topicsRegex).executeOn(io).take(count) val pushT = Observable .range(0, count) .map(msg => new ProducerRecord(topicMatchingRegex, "obs", msg.toString)) .bufferIntrospective(1024) .consumeWith(producer) val listT = consumer .map(_.value()) .toListL val (result, _) = Await.result(Task.parZip2(listT, pushT).runToFuture, 60.seconds) assert(result.map(_.toInt).sum === (0 until count).sum) } } }
Example 30
Source File: Implicits.scala From RosHTTP with MIT License | 5 votes |
package fr.hmil.roshttp.body import java.io.InputStream import java.nio.ByteBuffer import fr.hmil.roshttp.body.JSONBody._ import monix.reactive.Observable import monix.eval.Task object Implicits { implicit def stringToJSONString(value: String): JSONString = new JSONString(value) implicit def intToJSONNumber(value: Int): JSONNumber = new JSONNumber(value) implicit def floatToJSONNumber(value: Float): JSONNumber = new JSONNumber(value) implicit def doubleToJSONNumber(value: Double): JSONNumber = new JSONNumber(value) implicit def booleanToJSONBoolean(value: Boolean):JSONBoolean = new JSONBoolean(value) implicit def JSONObjectToJSONBody(obj: JSONObject): JSONBody = JSONBody(obj) implicit def JSONArrayToJSONBody(arr: JSONArray): JSONBody = JSONBody(arr) implicit def byteBufferToByteBufferBody(buffer: ByteBuffer): BodyPart = ByteBufferBody(buffer) implicit def observableToStreamBody(is: InputStream): BodyPart = StreamBody(Observable.fromInputStream(Task(is)).map(ByteBuffer.wrap)) }
Example 31
Source File: MultiPartBody.scala From RosHTTP with MIT License | 5 votes |
package fr.hmil.roshttp.body import java.nio.ByteBuffer import monix.execution.Scheduler import monix.reactive.Observable import scala.util.Random class MultiPartBody(parts: Map[String, BodyPart], subtype: String = "form-data")(implicit scheduler: Scheduler) extends BodyPart { val boundary = "----" + Random.alphanumeric.take(24).mkString.toLowerCase override def contentType: String = s"multipart/$subtype; boundary=$boundary" override def content: Observable[ByteBuffer] = { parts. // Prepend multipart encapsulation boundary and body part headers to // each body part. map({ case (name, part) => ByteBuffer.wrap( ("\r\n--" + boundary + "\r\n" + "Content-Disposition: form-data; name=\"" + name + "\"\r\n" + s"Content-Type: ${part.contentType}\r\n" + "\r\n").getBytes("utf-8") ) +: part.content }). // Join body parts reduceLeft((acc, elem) => acc ++ elem). // Append the closing boundary :+(ByteBuffer.wrap(s"\r\n--$boundary--\r\n".getBytes("utf-8"))) } } object MultiPartBody { def apply(parts: (String, BodyPart)*)(implicit scheduler: Scheduler): MultiPartBody = new MultiPartBody(Map(parts: _*)) }
Example 32
Source File: SimpleHttpResponse.scala From RosHTTP with MIT License | 5 votes |
package fr.hmil.roshttp.response import java.nio.ByteBuffer import fr.hmil.roshttp.BackendConfig import fr.hmil.roshttp.exceptions.ResponseException import fr.hmil.roshttp.util.{HeaderMap, Utils} import monix.execution.Scheduler import monix.reactive.Observable import scala.collection.mutable import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success} class SimpleHttpResponse( val statusCode: Int, val headers: HeaderMap[String], val body: String) extends HttpResponse object SimpleHttpResponse extends HttpResponseFactory[SimpleHttpResponse] { override def apply( header: HttpResponseHeader, bodyStream: Observable[ByteBuffer], config: BackendConfig) (implicit scheduler: Scheduler): Future[SimpleHttpResponse] = { val charset = Utils.charsetFromContentType(header.headers.getOrElse("content-type", null)) val buffers = mutable.Queue[ByteBuffer]() val promise = Promise[SimpleHttpResponse]() val streamCollector = bodyStream. foreach(elem => buffers.enqueue(elem)). map({_ => val body = recomposeBody(buffers, config.maxChunkSize, charset) new SimpleHttpResponse(header.statusCode, header.headers, body) }) streamCollector.onComplete({ case res:Success[SimpleHttpResponse] => promise.trySuccess(res.value) case e:Failure[_] => promise.tryFailure(new ResponseException(e.exception, header)) }) promise.future } private def recomposeBody(seq: mutable.Queue[ByteBuffer], maxChunkSize: Int, charset: String): String = { // Allocate maximum expected body length val buffer = ByteBuffer.allocate(seq.length * maxChunkSize) val totalBytes = seq.foldLeft(0)({ (count, chunk) => buffer.put(chunk) count + chunk.limit() }) buffer.limit(totalBytes) Utils.getStringFromBuffer(buffer, charset) } }
Example 33
Source File: StreamHttpResponse.scala From RosHTTP with MIT License | 5 votes |
package fr.hmil.roshttp.response import java.nio.ByteBuffer import fr.hmil.roshttp.BackendConfig import fr.hmil.roshttp.util.HeaderMap import monix.execution.Scheduler import monix.reactive.Observable import scala.concurrent.Future class StreamHttpResponse( val statusCode: Int, val headers: HeaderMap[String], val body: Observable[ByteBuffer]) extends HttpResponse object StreamHttpResponse extends HttpResponseFactory[StreamHttpResponse] { override def apply( header: HttpResponseHeader, bodyStream: Observable[ByteBuffer], config: BackendConfig) (implicit scheduler: Scheduler): Future[StreamHttpResponse] = Future.successful(new StreamHttpResponse(header.statusCode, header.headers, bodyStream)) }
Example 34
Source File: UdpIntegrationSpec.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.udp import java.net.InetSocketAddress import minitest.SimpleTestSuite import monix.eval.Task import monix.execution.Ack import monix.execution.Ack.{ Continue, Stop } import monix.reactive.Observable import scala.concurrent.duration._ import scala.concurrent.{ Await, Promise } object UdpIntegrationSpec extends SimpleTestSuite { implicit val ctx = monix.execution.Scheduler.Implicits.global test("send and receive UDP packets successfully") { val data = Array.fill(8)("monix") val writes = (ch: TaskDatagramChannel, to: InetSocketAddress) => Observable .fromIterable(data) .mapEval(data => ch.send(Packet(data.getBytes, to))) val readsPromise = Promise[String]() val recv = new StringBuilder("") val reads = (ch: TaskDatagramChannel, maxSize: Int) => Observable .repeatEval(ch.receive(maxSize, 2.seconds)) .mapEval(t => t) .map { packet => packet.foreach(p => recv.append(new String(p.data))) packet } .guaranteeCase(_ => Task(readsPromise.success(recv.mkString))) .subscribe(_.fold[Ack](Stop)(_ => Continue)) val program = for { ch <- bind("localhost", 2115).map { ch => reads(ch, 64) ch } sent <- writes(ch, new InetSocketAddress("localhost", 2115)).sumL received <- Task.fromFuture(readsPromise.future) _ <- ch.close() } yield sent == 40 & received == data.mkString("") val result = Await.result(program.runToFuture, 10.seconds) assert(result) } }
Example 35
Source File: SimpleWebSocketActor.scala From monix-sample with Apache License 2.0 | 5 votes |
package engine import akka.actor.{Actor, ActorRef, Props} import engine.SimpleWebSocketActor.Next import monix.execution.Scheduler import monix.reactive.Observable import monix.execution.Ack.Continue import monix.execution.cancelables.CompositeCancelable import org.joda.time.{DateTime, DateTimeZone} import play.api.libs.json.{JsValue, Json, Writes} import scala.concurrent.duration._ import engine.BackPressuredWebSocketActor._ class SimpleWebSocketActor[T: Writes] (producer: Observable[T], out: ActorRef)(implicit s: Scheduler) extends Actor { def receive: Receive = { case Next(jsValue) => out ! jsValue } private[this] val subscription = CompositeCancelable() override def preStart(): Unit = { super.preStart() val source = { val initial = Observable.evalOnce(initMessage(now())) val obs = initial ++ producer.map(x => Json.toJson(x)) val timeout = obs.debounce(3.seconds).map(_ => keepAliveMessage(now())) Observable.merge(obs, timeout) } subscription += source.subscribe { jsValue => self ! Next(jsValue) Continue } } override def postStop(): Unit = { subscription.cancel() super.postStop() } def now(): Long = DateTime.now(DateTimeZone.UTC).getMillis } object SimpleWebSocketActor { case class Next(value: JsValue) }
Example 36
Source File: DataConsumer.scala From monix-sample with Apache License 2.0 | 5 votes |
package client import monix.execution.Cancelable import monix.reactive.Observable import monix.reactive.OverflowStrategy.DropNew import monix.reactive.observers.Subscriber import org.scalajs.dom import shared.models.{Event, OverflowEvent, Signal} import scala.concurrent.duration.FiniteDuration import scala.scalajs.js.Dynamic.global final class DataConsumer(interval: FiniteDuration, seed: Long, doBackPressure: Boolean) extends Observable[Event] { override def unsafeSubscribeFn(subscriber: Subscriber[Event]): Cancelable = { val host = dom.window.location.host val protocol = if (dom.document.location.protocol == "https:") "wss:" else "ws:" val source = if (doBackPressure) { val url = s"$protocol//$host/back-pressured-stream?periodMillis=${interval.toMillis}&seed=$seed" BackPressuredWebSocketClient(url) } else { val url = s"$protocol//$host/simple-stream?periodMillis=${interval.toMillis}&seed=$seed" SimpleWebSocketClient(url, DropNew(1000)) } source .collect { case IsEvent(e) => e } .unsafeSubscribeFn(subscriber) } object IsEvent { def unapply(message: String) = { val json = global.JSON.parse(message) json.event.asInstanceOf[String] match { case "point" => Some(Signal( value = json.value.asInstanceOf[Number].doubleValue(), timestamp = json.timestamp.asInstanceOf[Number].longValue() )) case "overflow" => Some(OverflowEvent( dropped = json.dropped.asInstanceOf[Number].longValue(), timestamp = json.timestamp.asInstanceOf[Number].longValue() )) case "error" => val errorType = json.`type`.asInstanceOf[String] val message = json.message.asInstanceOf[String] throw new BackPressuredWebSocketClient.Exception( s"Server-side error throw - $errorType: $message") case _ => None } } } }
Example 37
Source File: MonixSampleClient.scala From monix-sample with Apache License 2.0 | 5 votes |
package client import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable import shared.models.Signal import scala.concurrent.duration._ import scala.scalajs.js object MonixSampleClient extends js.JSApp { def main(): Unit = { val line1 = new DataConsumer(200.millis, 1274028492832L, doBackPressure = true) .collect { case s: Signal => s } val line2 = new DataConsumer(200.millis, 9384729038472L, doBackPressure = true) .collect { case s: Signal => s } val line3 = new DataConsumer(200.millis, -2938472934842L, doBackPressure = false) .collect { case s: Signal => s } val line4 = new DataConsumer(200.millis, -9826395057397L, doBackPressure = false) .collect { case s: Signal => s } Observable .combineLatest4(line1, line2, line3, line4) .subscribe(new Graph("lineChart")) } }
Example 38
Source File: DynamoDbStreamsMonixClient.scala From reactive-aws-clients with MIT License | 5 votes |
// Auto-Generated package com.github.j5ik2o.reactive.aws.dynamodb.streams.monix import com.github.j5ik2o.reactive.aws.dynamodb.streams.{ DynamoDbStreamsAsyncClient, DynamoDbStreamsClient } import monix.eval.Task import monix.reactive.Observable import software.amazon.awssdk.services.dynamodb.model._ object DynamoDbStreamsMonixClient { def apply(underlying: DynamoDbStreamsAsyncClient): DynamoDbStreamsMonixClient = new DynamoDbStreamsMonixClientImpl(underlying) } trait DynamoDbStreamsMonixClient extends DynamoDbStreamsClient[Task] { val underlying: DynamoDbStreamsAsyncClient override def describeStream(describeStreamRequest: DescribeStreamRequest): Task[DescribeStreamResponse] = Task.deferFuture { underlying.describeStream(describeStreamRequest) } def describeStreamPaginator(describeStreamRequest: DescribeStreamRequest): Observable[DescribeStreamResponse] = Observable.fromReactivePublisher(underlying.describeStreamPaginator(describeStreamRequest)) override def getRecords(getRecordsRequest: GetRecordsRequest): Task[GetRecordsResponse] = Task.deferFuture { underlying.getRecords(getRecordsRequest) } override def getShardIterator(getShardIteratorRequest: GetShardIteratorRequest): Task[GetShardIteratorResponse] = Task.deferFuture { underlying.getShardIterator(getShardIteratorRequest) } override def listStreams(listStreamsRequest: ListStreamsRequest): Task[ListStreamsResponse] = Task.deferFuture { underlying.listStreams(listStreamsRequest) } override def listStreams(): Task[ListStreamsResponse] = Task.deferFuture { underlying.listStreams() } def listStreamsPaginator(): Observable[ListStreamsResponse] = Observable.fromReactivePublisher(underlying.listStreamsPaginator()) def listStreamsPaginator(listStreamsRequest: ListStreamsRequest): Observable[ListStreamsResponse] = Observable.fromReactivePublisher(underlying.listStreamsPaginator(listStreamsRequest)) }
Example 39
Source File: protocol.scala From frees-rpc-workshop with Apache License 2.0 | 5 votes |
package scalaexchange package services import freestyle.rpc.protocol._ import monix.reactive.Observable object protocol { final case class Segment( title: String, minRecency: Int, maxRecency: Int, minFrequency: Int, maxFrequency: Int, minMonetary: Int, maxMonetary: Int ) final case class SegmentList(list: List[Segment]) final case class Ack(result: String) @service trait RFMAnalysisService[F[_]] { @rpc(Avro) def segments(empty: Empty.type): F[SegmentList] @rpc(Avro) @stream[ResponseStreaming.type] def userEvents(empty: Empty.type): F[Observable[UserEvent]] @rpc(Avro) @stream[RequestStreaming.type] def orderStream(orders: Observable[Order]): F[Ack] } }
Example 40
Source File: AppRFMClient.scala From frees-rpc-workshop with Apache License 2.0 | 5 votes |
package scalaexchange package app import cats.effect.IO import freestyle.rpc.protocol.Empty import freestyle.asyncCatsEffect.implicits._ import freestyle.rpc.client.implicits._ import monix.reactive.Observable import scala.concurrent.Await import scala.concurrent.duration._ import scalaexchange.services.protocol._ object AppRFMClient extends Implicits { def main(args: Array[String]): Unit = { implicit val rfmClient: RFMAnalysisService.Client[IO] = RFMAnalysisService.client[IO](channel) val (segments: IO[SegmentList], stream: Observable[UserEvent], ack: IO[Ack]) = ( rfmClient.segments(Empty), rfmClient.userEvents(Empty), rfmClient.orderStream(ordersStreamObs) ) println(s"Segments: \n${segments.unsafeRunSync().list.mkString("\n")}\n") println(s"Client Streaming: \n${ack.unsafeRunSync()}\n") Await.ready( stream .map { u => println(u) u } .completedL .runAsync, Duration.Inf) } private[this] def ordersStreamObs: Observable[Order] = { val orderList: List[Order] = (1 to 1000).map { customerId => import com.fortysevendeg.scalacheck.datetime.GenDateTime import org.joda.time.{DateTime, Period} import org.scalacheck._ import com.fortysevendeg.scalacheck.datetime.instances.joda.jodaForPeriod (for { date <- GenDateTime.genDateTimeWithinRange(DateTime.parse("2017-12-01"), Period.days(22)) orderId <- Gen.uuid total <- Gen.choose[Int](5, 200) } yield Order( customerId, CustomerData(date.toString, orderId.toString, total) )).sample.get }.toList Observable.fromIterable(orderList) } }
Example 41
Source File: UserEventProducer.scala From frees-rpc-workshop with Apache License 2.0 | 5 votes |
package scalaexchange package datagenerator import com.fortysevendeg.scalacheck.datetime.GenDateTime import com.fortysevendeg.scalacheck.datetime.instances.joda.jodaForPeriod import monix.execution.Cancelable import monix.reactive.Observable import monix.reactive.observers.Subscriber import org.joda.time.{DateTime, Period} import scala.concurrent.duration.FiniteDuration class UserEventProducer(interval: FiniteDuration) extends Observable[UserEvent] { override def unsafeSubscribeFn(subscriber: Subscriber[UserEvent]): Cancelable = { val userEventRandom: Observable[UserEvent] = Observable .fromStateAction(eventsGen)(Nil) .flatMap { a => Observable.now(a).delaySubscription(interval) } userEventRandom.drop(1).unsafeSubscribeFn(subscriber) } private[this] def eventsGen(initialState: List[UserEvent]): (UserEvent, List[UserEvent]) = { import org.scalacheck._ val dataGen: Arbitrary[UserEvent] = Arbitrary { import Gen._ for { id <- choose(1, 100) eventType <- Gen.oneOf(List(ProcessedCheckout, UnprocessedCheckout, Login)) date <- GenDateTime.genDateTimeWithinRange(DateTime.parse("2017-12-01"), Period.days(22)) } yield UserEvent(id, eventType, date.toString()) } val newEvent: UserEvent = dataGen.arbitrary.sample .getOrElse(throw DataGenerationException("Exception creating new random event")) (newEvent, initialState :+ newEvent) } }
Example 42
Source File: CodecTest.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.file import java.nio.file.{ Files, Paths } import java.util import minitest.SimpleTestSuite import monix.eval.Task import monix.execution.Callback import monix.execution.Scheduler.Implicits.{ global => ctx } import monix.nio.file import monix.nio.text.UTF8Codec.{ utf8Decode, utf8Encode } import monix.reactive.Observable import scala.concurrent.duration._ import scala.concurrent.{ Await, Promise } object CodecTest extends SimpleTestSuite { test("decode file utf8") { val from = Paths.get(this.getClass.getResource("/testFiles/specialChars.txt").toURI) val p = Promise[Seq[Byte]]() val callback = new Callback[Throwable, List[Array[Byte]]] { override def onSuccess(value: List[Array[Byte]]): Unit = p.success(value.flatten) override def onError(ex: Throwable): Unit = p.failure(ex) } readAsync(from, 3) .pipeThrough(utf8Decode) .pipeThrough(utf8Encode) .toListL .runAsync(callback) val result = Await.result(p.future, 3.second) val f1 = Files.readAllBytes(from) val f2 = result assert(util.Arrays.equals(f1, f2.toArray)) } test("decode special chars") { val strSeq = Seq("A", "\u0024", "\u00A2", "\u20AC", new String(Array(0xF0, 0x90, 0x8D, 0x88).map(_.toByte)), "B") for (grouping <- 1 to 12) { val obsSeq = Observable .fromIterator(Task(strSeq.flatMap(_.getBytes).grouped(grouping).map(_.toArray))) .pipeThrough(utf8Decode) val p = Promise[Boolean]() val callback = new Callback[Throwable, List[String]] { override def onSuccess(value: List[String]): Unit = { p.success(if (value.mkString == strSeq.mkString) true else false) } override def onError(ex: Throwable): Unit = p.failure(ex) } obsSeq.toListL.runAsync(callback) val result = Await.result(p.future, 3.second) assert(result) } } test("copy file utf8") { val from = Paths.get(this.getClass.getResource("/testFiles/specialChars.txt").toURI) val to = Paths.get("src/test/resources/res.txt") val consumer = file.writeAsync(to) val p = Promise[Long]() val callback = new Callback[Throwable, Long] { override def onSuccess(value: Long): Unit = p.success(value) override def onError(ex: Throwable): Unit = p.failure(ex) } readAsync(from, 3) .pipeThrough(utf8Decode) .map { str => //Console.println(str) str } .pipeThrough(utf8Encode) .consumeWith(consumer) .runAsync(callback) val result = Await.result(p.future, 3.second) val f1 = Files.readAllBytes(from) val f2 = result Files.delete(to) assertEquals(f1.size, f2) } }
Example 43
Source File: BackPressuredWebSocketActor.scala From monix-sample with Apache License 2.0 | 5 votes |
package engine import akka.actor.{Actor, ActorRef, Props} import com.typesafe.scalalogging.LazyLogging import engine.BackPressuredWebSocketActor._ import monix.execution.Scheduler import monix.execution.rstreams.SingleAssignmentSubscription import monix.reactive.Observable import org.reactivestreams.{Subscriber, Subscription} import play.api.libs.json._ import scala.concurrent.duration._ import scala.util.Try class BackPressuredWebSocketActor[T: Writes] (producer: Observable[T], out: ActorRef)(implicit s: Scheduler) extends Actor with LazyLogging { def receive: Receive = { case JsNumber(nr) if nr > 0 => Try(nr.toLongExact).foreach(subscription.request) } private[this] val subscription = SingleAssignmentSubscription() def now(): Long = System.currentTimeMillis() override def preStart(): Unit = { super.preStart() val source = { val initial = Observable.evalOnce(initMessage(now())) val obs = initial ++ producer.map(x => Json.toJson(x)) val timeout = obs.debounceRepeated(5.seconds).map(_ => keepAliveMessage(now())) Observable .merge(obs, timeout) .whileBusyDropEventsAndSignal(nr => onOverflow(nr, now())) } source.toReactivePublisher.subscribe(new Subscriber[JsValue] { def onSubscribe(s: Subscription): Unit = { subscription := s } def onNext(json: JsValue): Unit = { out ! json } def onError(t: Throwable): Unit = { logger.warn(s"Error while serving a web-socket stream", t) out ! Json.obj( "event" -> "error", "type" -> t.getClass.getName, "message" -> t.getMessage, "timestamp" -> now()) context.stop(self) } def onComplete(): Unit = { out ! Json.obj("event" -> "complete", "timestamp" -> now()) context.stop(self) } }) } override def postStop(): Unit = { subscription.cancel() super.postStop() } } object BackPressuredWebSocketActor { def initMessage(now: Long) = { Json.obj("event" -> "init", "timestamp" -> now) } }
Example 44
Source File: AsyncChannelObservable.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio import java.nio.ByteBuffer import monix.eval.Task import monix.execution.Ack.{ Continue, Stop } import monix.execution.{ Callback, Cancelable, Scheduler } import monix.execution.atomic.Atomic import monix.execution.cancelables.SingleAssignCancelable import monix.execution.exceptions.APIContractViolationException import monix.nio.internal.{ Bytes, EmptyBytes, NonEmptyBytes } import monix.reactive.Observable import monix.reactive.observers.Subscriber import scala.concurrent.Future import scala.util.control.NonFatal private[nio] abstract class AsyncChannelObservable extends Observable[Array[Byte]] { def bufferSize: Int def channel: Option[AsyncChannel] def init(subscriber: Subscriber[Array[Byte]]): Future[Unit] = Future.successful(()) private[this] val wasSubscribed = Atomic(false) override def unsafeSubscribeFn(subscriber: Subscriber[Array[Byte]]): Cancelable = { import subscriber.scheduler if (wasSubscribed.getAndSet(true)) { subscriber.onError(APIContractViolationException(this.getClass.getName)) Cancelable.empty } else try startReading(subscriber) catch { case NonFatal(e) => subscriber.onError(e) closeChannel() Cancelable.empty } } private def startReading(subscriber: Subscriber[Array[Byte]]): Cancelable = { import subscriber.scheduler val taskCallback = new Callback[Throwable, Array[Byte]]() { override def onSuccess(value: Array[Byte]): Unit = { channel.collect { case sc if sc.closeOnComplete => closeChannel() } } override def onError(ex: Throwable): Unit = { closeChannel() subscriber.onError(ex) } } val cancelable = Task .fromFuture(init(subscriber)) .flatMap { _ => loop(subscriber, 0) } .executeWithOptions(_.enableAutoCancelableRunLoops) .runAsync(taskCallback) val extraCancelable = Cancelable(() => { cancelable.cancel() closeChannel() }) SingleAssignCancelable.plusOne(extraCancelable) } private[this] val buffer = ByteBuffer.allocate(bufferSize) private def loop(subscriber: Subscriber[Array[Byte]], position: Long)(implicit scheduler: Scheduler): Task[Array[Byte]] = { buffer.clear() channel.map { ch => ch .read(buffer, position) .doOnCancel(Task.defer(ch.close())) .flatMap { result => val bytes = Bytes(buffer, result) bytes match { case EmptyBytes => subscriber.onComplete() Task.now(Bytes.emptyBytes) case NonEmptyBytes(arr) => Task.fromFuture(subscriber.onNext(arr)).flatMap { case Continue => loop(subscriber, position + result) case Stop => Task.now(Bytes.emptyBytes) } } } }.getOrElse(Task.now(Bytes.emptyBytes)) } private[nio] final def closeChannel()(implicit scheduler: Scheduler) = channel.foreach(_.close().runToFuture) }
Example 45
Source File: WatchServiceObservable.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio import java.nio.file.WatchEvent import monix.eval.Task import monix.execution.Ack.{ Continue, Stop } import monix.execution.atomic.Atomic import monix.execution.cancelables.SingleAssignCancelable import monix.execution.exceptions.APIContractViolationException import monix.execution.{ Callback, Cancelable, Scheduler } import monix.reactive.Observable import monix.reactive.observers.Subscriber import scala.concurrent.Future import scala.util.control.NonFatal abstract class WatchServiceObservable extends Observable[Array[WatchEvent[_]]] { def watchService: Option[WatchService] private[this] val wasSubscribed = Atomic(false) override def unsafeSubscribeFn(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = { if (wasSubscribed.getAndSet(true)) { subscriber.onError(APIContractViolationException(this.getClass.getName)) Cancelable.empty } else try startPolling(subscriber) catch { case NonFatal(e) => subscriber.onError(e) Cancelable.empty } } def init(subscriber: Subscriber[Array[WatchEvent[_]]]): Future[Unit] = Future.successful(()) private def startPolling(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = { import subscriber.scheduler val taskCallback = new Callback[Throwable, Array[WatchEvent[_]]]() { override def onSuccess(value: Array[WatchEvent[_]]): Unit = {} override def onError(ex: Throwable): Unit = { subscriber.onError(ex) } } val cancelable = Task .fromFuture(init(subscriber)) .flatMap { _ => loop(subscriber) } .executeWithOptions(_.enableAutoCancelableRunLoops) .runAsync(taskCallback) val extraCancelable = Cancelable(() => { cancelable.cancel() }) SingleAssignCancelable.plusOne(extraCancelable) } private def loop(subscriber: Subscriber[Array[WatchEvent[_]]])(implicit scheduler: Scheduler): Task[Array[WatchEvent[_]]] = { import collection.JavaConverters._ watchService.map { ws => ws.take() .doOnCancel(Task.defer(ws.close())) .flatMap { key => val events = key.pollEvents().asScala.toArray key.reset() Task.fromFuture(subscriber.onNext(events)).flatMap { case Continue => loop(subscriber) case Stop => emptyTask } } } }.getOrElse(emptyTask) private val emptyTask = Task.create[Array[WatchEvent[_]]]((_, _) => Cancelable.empty) }
Example 46
Source File: BaseProtocolMessage.scala From lsp4s with Apache License 2.0 | 5 votes |
package scala.meta.jsonrpc import java.io.InputStream import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util import io.circe.Json import io.circe.syntax._ import monix.reactive.Observable import scribe.LoggerSupport final class BaseProtocolMessage( val header: Map[String, String], val content: Array[Byte] ) { override def equals(obj: scala.Any): Boolean = this.eq(obj.asInstanceOf[Object]) || { obj match { case m: BaseProtocolMessage => header.equals(m.header) && util.Arrays.equals(content, m.content) } } override def toString: String = { val bytes = MessageWriter.write(this) StandardCharsets.UTF_8.decode(bytes).toString } } object BaseProtocolMessage { val ContentLen = "Content-Length" def apply(msg: Message): BaseProtocolMessage = fromJson(msg.asJson) def fromJson(json: Json): BaseProtocolMessage = fromBytes(json.noSpaces.getBytes(StandardCharsets.UTF_8)) def fromBytes(bytes: Array[Byte]): BaseProtocolMessage = new BaseProtocolMessage( Map("Content-Length" -> bytes.length.toString), bytes ) def fromInputStream( in: InputStream, logger: LoggerSupport ): Observable[BaseProtocolMessage] = fromBytes(Observable.fromInputStream(in), logger) def fromBytes( in: Observable[Array[Byte]], logger: LoggerSupport ): Observable[BaseProtocolMessage] = fromByteBuffers(in.map(ByteBuffer.wrap), logger) def fromByteBuffers( in: Observable[ByteBuffer], logger: LoggerSupport ): Observable[BaseProtocolMessage] = in.executeAsync.liftByOperator(new BaseProtocolMessageParser(logger)) }
Example 47
Source File: MonixEnrichments.scala From lsp4s with Apache License 2.0 | 5 votes |
package scala.meta.jsonrpc import java.io.IOException import java.io.OutputStream import java.nio.ByteBuffer import monix.execution.Ack import monix.execution.Cancelable import monix.execution.Scheduler import monix.reactive.Observable import monix.reactive.Observer import scribe.LoggerSupport object MonixEnrichments { class ObservableCurrentValue[+A](obs: Observable[A])(implicit s: Scheduler) extends (() => A) with Cancelable { private var value: Any = _ private val cancelable = obs.foreach(newValue => value = newValue) override def apply(): A = { if (value == null) { throw new NoSuchElementException( "Reading from empty Observable, consider using MulticastStrategy.behavior(initialValue)" ) } else { value.asInstanceOf[A] } } override def cancel(): Unit = cancelable.cancel() } implicit class XtensionObservable[A](val obs: Observable[A]) extends AnyVal { def focus[B: cats.Eq](f: A => B): Observable[B] = obs.distinctUntilChangedByKey(f).map(f) def toFunction0()(implicit s: Scheduler): () => A = toObservableCurrentValue() def toObservableCurrentValue()( implicit s: Scheduler ): ObservableCurrentValue[A] = new ObservableCurrentValue[A](obs) } implicit class XtensionObserverCompanion[A](val `_`: Observer.type) extends AnyVal { def fromOutputStream( out: OutputStream, logger: LoggerSupport ): Observer.Sync[ByteBuffer] = { new Observer.Sync[ByteBuffer] { private[this] var isClosed: Boolean = false override def onNext(elem: ByteBuffer): Ack = { if (isClosed) Ack.Stop else { try { while (elem.hasRemaining) out.write(elem.get()) out.flush() Ack.Continue } catch { case _: IOException => logger.error("OutputStream closed!") isClosed = true Ack.Stop } } } override def onError(ex: Throwable): Unit = () override def onComplete(): Unit = out.close() } } } }
Example 48
Source File: OutwatchSpec.scala From outwatch with Apache License 2.0 | 5 votes |
package outwatch import scala.concurrent.Future import cats.effect.ContextShift import cats.effect.IO import monix.execution.Ack.Continue import monix.execution.ExecutionModel.SynchronousExecution import monix.execution.schedulers.TrampolineScheduler import monix.execution.{Cancelable, Scheduler} import monix.reactive.Observable import org.scalajs.dom.{document, window} import org.scalatest.BeforeAndAfterEach import org.scalatest._ import outwatch.Deprecated.IgnoreWarnings.initEvent import org.scalatest.flatspec.{ AnyFlatSpec, AsyncFlatSpec } import org.scalatest.matchers.should.Matchers trait EasySubscribe { implicit class Subscriber[T](obs: Observable[T]) { def apply(next: T => Unit)(implicit s: Scheduler): Cancelable = obs.subscribe { t => next(t) Continue } } } // TODO: We need this mock until localStorage is implemented in jsdom (https://github.com/tmpvar/jsdom/pull/2076) trait LocalStorageMock { import scala.collection.mutable import scala.scalajs.js if (js.isUndefined(window.localStorage)) { val storageObject = new js.Object { private val map = new mutable.HashMap[String, String] def getItem(key: String): String = map.getOrElse(key, null) def setItem(key: String, value: String): Unit = { map += key -> value } def removeItem(key: String): Unit = { map -= key } def clear(): Unit = map.clear() } js.Dynamic.global.window.updateDynamic("localStorage")(storageObject) } def dispatchStorageEvent(key: String, newValue: String, oldValue: String): Unit = { if (key == null) window.localStorage.clear() else window.localStorage.setItem(key, newValue) val event = document.createEvent("Events") initEvent(event)("storage", canBubbleArg = true, cancelableArg = false) event.asInstanceOf[js.Dynamic].key = key event.asInstanceOf[js.Dynamic].newValue = newValue event.asInstanceOf[js.Dynamic].oldValue = oldValue event.asInstanceOf[js.Dynamic].storageArea = window.localStorage window.dispatchEvent(event) () } } trait OutwatchSpec extends Matchers with BeforeAndAfterEach with EasySubscribe with LocalStorageMock { self: Suite => implicit val scheduler: TrampolineScheduler = TrampolineScheduler(Scheduler.global, SynchronousExecution) implicit val cs: ContextShift[IO] = IO.contextShift(scheduler) override def beforeEach(): Unit = { document.body.innerHTML = "" window.localStorage.clear() // prepare body with <div id="app"></div> val root = document.createElement("div") root.id = "app" document.body.appendChild(root) () } } abstract class JSDomSpec extends AnyFlatSpec with OutwatchSpec { implicit def executionContext = scheduler } abstract class JSDomAsyncSpec extends AsyncFlatSpec with OutwatchSpec { override def executionContext = scheduler implicit def ioAssertionToFutureAssertion(io: IO[Assertion]): Future[Assertion] = io.unsafeToFuture() }
Example 49
Source File: Cp.scala From benchmarks with Apache License 2.0 | 5 votes |
package com.rossabaker package benchmarks import org.openjdk.jmh.annotations._ @State(Scope.Thread) @Fork(2) @Measurement(iterations = 10) @Warmup(iterations = 10) @Threads(1) class Cp extends BenchmarkUtils { @Benchmark def fs2Sync(): Unit = { import _root_.fs2._, Stream._ import java.nio.file.Paths io.file.readAll[Task](Paths.get("testdata/lorem-ipsum.txt"), 4096) .to(io.file.writeAll[Task](Paths.get("out/lorem-ipsum.txt"))) .run .unsafeRun } @Benchmark def fs2Async(): Unit = { import _root_.fs2._, Stream._ import java.nio.file.Paths io.file.readAllAsync[Task](Paths.get("testdata/lorem-ipsum.txt"), 4096) .to(io.file.writeAllAsync[Task](Paths.get("out/lorem-ipsum.txt"))) .run .unsafeRun } @Benchmark def scalazStreamIo(): Unit = { import _root_.scalaz.stream._, Process._ constant(4096) .through(io.fileChunkR("testdata/lorem-ipsum.txt")) .to(io.fileChunkW("out/lorem-ipsum.txt")) .run .unsafePerformSync } @Benchmark def scalazStreamNio(): Unit = { import _root_.scalaz.stream._, Process._ constant(4096) .through(nio.file.chunkR("testdata/lorem-ipsum.txt")) .to(nio.file.chunkW("out/lorem-ipsum.txt")) .run .unsafePerformSync } } callback.onError(ex) } def onComplete(): Unit = { try { out.close() callback.onSuccess(()) } catch { case NonFatal(ex) => callback.onError(ex) } } } } Await.result( copyFile(new File("testdata/lorem-ipsum.txt"), new File("out/lorem-ipsum.txt"), 4096) .runAsync(monixScheduler), Duration.Inf ) } }
Example 50
Source File: WavesBlockchainCachingClient.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.clients import java.net.InetAddress import java.time.Duration import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.asset.Asset import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.dex.domain.transaction.ExchangeTransaction import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.grpc.integration.caches.{AssetDescriptionsCache, FeaturesCache} import com.wavesplatform.dex.grpc.integration.clients.WavesBlockchainClient.SpendableBalanceChanges import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription import monix.execution.Scheduler import monix.reactive.Observable import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} class WavesBlockchainCachingClient(underlying: WavesBlockchainClient[Future], defaultCacheExpiration: FiniteDuration, monixScheduler: Scheduler)( implicit grpcExecutionContext: ExecutionContext) extends WavesBlockchainClient[Future] with ScorexLogging { private val cacheExpiration: Duration = Duration.ofMillis(defaultCacheExpiration.toMillis) private val featuresCache = new FeaturesCache(underlying.isFeatureActivated, invalidationPredicate = !_) // we don't keep knowledge about unactivated features private val assetDescriptionsCache = new AssetDescriptionsCache(underlying.assetDescription, cacheExpiration) // TODO remove after release 2.1.3 override def spendableBalance(address: Address, asset: Asset): Future[Long] = underlying.spendableBalance(address, asset) override def spendableBalanceChanges: Observable[SpendableBalanceChanges] = underlying.spendableBalanceChanges override def realTimeBalanceChanges: Observable[WavesBlockchainClient.BalanceChanges] = underlying.realTimeBalanceChanges override def spendableBalances(address: Address, assets: Set[Asset]): Future[Map[Asset, Long]] = underlying.spendableBalances(address, assets) override def allAssetsSpendableBalance(address: Address): Future[Map[Asset, Long]] = underlying.allAssetsSpendableBalance(address) override def isFeatureActivated(id: Short): Future[Boolean] = featuresCache.get(id) map Boolean2boolean override def assetDescription(asset: Asset.IssuedAsset): Future[Option[BriefAssetDescription]] = assetDescriptionsCache.get(asset) override def hasScript(asset: Asset.IssuedAsset): Future[Boolean] = underlying.hasScript(asset) override def runScript(asset: Asset.IssuedAsset, input: ExchangeTransaction): Future[RunScriptResult] = underlying.runScript(asset, input) override def hasScript(address: Address): Future[Boolean] = underlying.hasScript(address) override def runScript(address: Address, input: Order): Future[RunScriptResult] = underlying.runScript(address, input) override def wereForged(txIds: Seq[ByteStr]): Future[Map[ByteStr, Boolean]] = underlying.wereForged(txIds) override def broadcastTx(tx: ExchangeTransaction): Future[Boolean] = underlying.broadcastTx(tx) override def forgedOrder(orderId: ByteStr): Future[Boolean] = underlying.forgedOrder(orderId) override def getNodeAddress: Future[InetAddress] = underlying.getNodeAddress override def close(): Future[Unit] = underlying.close() }
Example 51
Source File: ObservableSpec.scala From gbf-raidfinder with MIT License | 5 votes |
package walfie.gbf.raidfinder.util import monix.eval.Task import monix.execution.Ack import monix.execution.schedulers.TestScheduler import monix.reactive.{Observable, Observer} import org.scalatest._ import org.scalatest.concurrent.ScalaFutures import org.scalatest.Matchers._ import scala.concurrent.Future class ObservableSpec extends FreeSpec with ScalaFutures { case class Item(id: Int) object ItemRepository { def getItems(count: Int, pageNum: Int): Future[Seq[Item]] = Future.successful { (0 until count).map(i => Item(pageNum * count + i)) } } // This was added as [[ObservableUtil.fromAsyncStateAction]] before // [[Observable.fromAsyncStateAction]] existed in Monix. Keeping these // tests around because why not. "fromAsyncStateAction" - { implicit val scheduler = TestScheduler() "yield an observable" in { val itemsPerPage = 5 val observable = Observable.fromAsyncStateAction { pageNum: Int => val nextPage = pageNum + 1 val itemsF = ItemRepository.getItems(itemsPerPage, pageNum) Task.fromFuture(itemsF).map(_ -> nextPage) }(0) val resultF = observable.take(3).toListL.runAsync scheduler.tick() resultF.futureValue shouldBe Seq( (0 to 4).map(Item.apply), (5 to 9).map(Item.apply), (10 to 14).map(Item.apply) ) } "stop on error" in { implicit val scheduler = TestScheduler() // Create an observable counter that errors when it gets to 5 val error = new RuntimeException("Oh no!") val observable = Observable .fromAsyncStateAction[Int, Int] { counter: Int => Task.fromFuture { if (counter == 5) Future.failed(error) else Future.successful(counter -> (counter + 1)) } }(0) val observer = new TestObserver[Int] observable.take(10).subscribe(observer) scheduler.tick() observer.received shouldBe (0 to 4) } } }
Example 52
Source File: DataProducer.scala From monix-sample with Apache License 2.0 | 5 votes |
package engine import monix.execution.Cancelable import monix.reactive.Observable import monix.reactive.observers.Subscriber import shared.models.Signal import util.Random import scala.concurrent.duration._ final class DataProducer(interval: FiniteDuration, seed: Long) extends Observable[Signal] { override def unsafeSubscribeFn(subscriber: Subscriber[Signal]): Cancelable = { import subscriber.{scheduler => s} val random = Observable .fromStateAction(Random.intInRange(-20, 20))(s.currentTimeMillis() + seed) .flatMap { x => Observable.now(x).delaySubscription(interval) } val generator = random.scan(Signal(0, s.currentTimeMillis())) { case (Signal(value, _), rnd) => Signal(value + rnd, s.currentTimeMillis()) } generator .drop(1) .unsafeSubscribeFn(subscriber) } }
Example 53
Source File: Runner.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.monix import io.getquill.context.ContextEffect import monix.eval.Task import monix.execution.Scheduler import monix.reactive.Observable object Runner { def default = new Runner {} def using(scheduler: Scheduler) = new Runner { override def schedule[T](t: Task[T]): Task[T] = t.executeOn(scheduler, true) override def boundary[T](t: Task[T]): Task[T] = t.executeOn(scheduler, true) override def scheduleObservable[T](o: Observable[T]): Observable[T] = o.executeOn(scheduler, true) } } trait Runner extends ContextEffect[Task] { override def wrap[T](t: => T): Task[T] = Task(t) override def push[A, B](result: Task[A])(f: A => B): Task[B] = result.map(f) override def seq[A, B](list: List[Task[A]]): Task[List[A]] = Task.sequence(list) def schedule[T](t: Task[T]): Task[T] = t def scheduleObservable[T](o: Observable[T]): Observable[T] = o def boundary[T](t: Task[T]): Task[T] = t.asyncBoundary def wrapClose(t: => Unit): Task[Unit] = Task(t) }
Example 54
Source File: MonixContext.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.monix import io.getquill.NamingStrategy import io.getquill.context.{ Context, StreamingContext } import monix.eval.Task import monix.reactive.Observable trait MonixContext[Idiom <: io.getquill.idiom.Idiom, Naming <: NamingStrategy] extends Context[Idiom, Naming] with StreamingContext[Idiom, Naming] { override type StreamResult[T] = Observable[T] override type Result[T] = Task[T] override type RunQueryResult[T] = List[T] override type RunQuerySingleResult[T] = T // Need explicit return-type annotations due to scala/bug#8356. Otherwise macro system will not understand Result[Long]=Task[Long] etc... def executeQuery[T](sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[List[T]] def executeQuerySingle[T](sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[T] protected val effect: Runner }
Example 55
Source File: CassandraStreamContext.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill import com.datastax.driver.core.{ Cluster, ResultSet, Row } import com.typesafe.config.Config import io.getquill.context.cassandra.util.FutureConversions._ import io.getquill.util.{ ContextLogger, LoadConfig } import monix.eval.Task import monix.execution.Scheduler import monix.execution.Scheduler.Implicits import monix.reactive.Observable import scala.jdk.CollectionConverters._ import scala.util.{ Failure, Success } class CassandraStreamContext[N <: NamingStrategy]( naming: N, cluster: Cluster, keyspace: String, preparedStatementCacheSize: Long ) extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize) { def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize) def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config)) def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix)) private val logger = ContextLogger(classOf[CassandraStreamContext[_]]) override type Result[T] = Observable[T] override type RunQueryResult[T] = T override type RunQuerySingleResult[T] = T override type RunActionResult = Unit override type RunBatchActionResult = Unit protected def page(rs: ResultSet): Task[Iterable[Row]] = Task.defer { val available = rs.getAvailableWithoutFetching val page = rs.asScala.take(available) if (rs.isFullyFetched) Task.now(page) else Task.fromFuture(rs.fetchMoreResults().asScala(Implicits.global)).map(_ => page) } def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] = { Observable .fromTask(prepareRowAndLog(cql, prepare)) .mapEvalF(p => session.executeAsync(p).asScala(Implicits.global)) .flatMap(Observable.fromAsyncStateAction((rs: ResultSet) => page(rs).map((_, rs)))(_)) .takeWhile(_.nonEmpty) .flatMap(Observable.fromIterable) .map(extractor) } def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] = executeQuery(cql, prepare, extractor) def executeAction[T](cql: String, prepare: Prepare = identityPrepare): Observable[Unit] = { Observable .fromTask(prepareRowAndLog(cql, prepare)) .mapEvalF(p => session.executeAsync(p).asScala(Implicits.global)) .map(_ => ()) } def executeBatchAction(groups: List[BatchGroup]): Observable[Unit] = Observable.fromIterable(groups).flatMap { case BatchGroup(cql, prepare) => Observable.fromIterable(prepare) .flatMap(executeAction(cql, _)) .map(_ => ()) } private def prepareRowAndLog(cql: String, prepare: Prepare = identityPrepare): Task[PrepareRow] = { Task.async0[PrepareRow] { (scheduler, callback) => implicit val executor: Scheduler = scheduler super.prepareAsync(cql) .map(prepare) .onComplete { case Success((params, bs)) => logger.logQuery(cql, params) callback.onSuccess(bs) case Failure(ex) => callback.onError(ex) } } } }
Example 56
Source File: CassandraMonixContext.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill import com.datastax.driver.core.{ Cluster, ResultSet, Row } import com.typesafe.config.Config import io.getquill.context.cassandra.CqlIdiom import io.getquill.context.monix.{ MonixContext, Runner } import io.getquill.util.{ ContextLogger, LoadConfig } import io.getquill.context.cassandra.util.FutureConversions._ import monix.eval.Task import monix.execution.Scheduler import monix.reactive.Observable import scala.jdk.CollectionConverters._ import scala.util.{ Failure, Success } class CassandraMonixContext[N <: NamingStrategy]( naming: N, cluster: Cluster, keyspace: String, preparedStatementCacheSize: Long ) extends CassandraClusterSessionContext[N](naming, cluster, keyspace, preparedStatementCacheSize) with MonixContext[CqlIdiom, N] { // not using this here override val effect = Runner.default def this(naming: N, config: CassandraContextConfig) = this(naming, config.cluster, config.keyspace, config.preparedStatementCacheSize) def this(naming: N, config: Config) = this(naming, CassandraContextConfig(config)) def this(naming: N, configPrefix: String) = this(naming, LoadConfig(configPrefix)) private val logger = ContextLogger(classOf[CassandraMonixContext[_]]) override type StreamResult[T] = Observable[T] override type RunActionResult = Unit override type Result[T] = Task[T] override type RunQueryResult[T] = List[T] override type RunQuerySingleResult[T] = T override type RunBatchActionResult = Unit protected def page(rs: ResultSet): Task[Iterable[Row]] = Task.defer { val available = rs.getAvailableWithoutFetching val page = rs.asScala.take(available) if (rs.isFullyFetched) Task.now(page) else Task.fromFuture(rs.fetchMoreResults().asScalaWithDefaultGlobal).map(_ => page) } def streamQuery[T](fetchSize: Option[Int], cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Observable[T] = { Observable .fromTask(prepareRowAndLog(cql, prepare)) .mapEvalF(p => session.executeAsync(p).asScalaWithDefaultGlobal) .flatMap(Observable.fromAsyncStateAction((rs: ResultSet) => page(rs).map((_, rs)))(_)) .takeWhile(_.nonEmpty) .flatMap(Observable.fromIterable) .map(extractor) } def executeQuery[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[List[T]] = { streamQuery[T](None, cql, prepare, extractor) .foldLeftL(List[T]())({ case (l, r) => r +: l }).map(_.reverse) } def executeQuerySingle[T](cql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor): Task[T] = executeQuery(cql, prepare, extractor).map(handleSingleResult(_)) def executeAction[T](cql: String, prepare: Prepare = identityPrepare): Task[Unit] = { prepareRowAndLog(cql, prepare) .flatMap(r => Task.fromFuture(session.executeAsync(r).asScalaWithDefaultGlobal)) .map(_ => ()) } def executeBatchAction(groups: List[BatchGroup]): Task[Unit] = Observable.fromIterable(groups).flatMap { case BatchGroup(cql, prepare) => Observable.fromIterable(prepare) .flatMap(prep => Observable.fromTask(executeAction(cql, prep))) .map(_ => ()) }.completedL private def prepareRowAndLog(cql: String, prepare: Prepare = identityPrepare): Task[PrepareRow] = { Task.async0[PrepareRow] { (scheduler, callback) => implicit val executor: Scheduler = scheduler super.prepareAsync(cql) .map(prepare) .onComplete { case Success((params, bs)) => logger.logQuery(cql, params) callback.onSuccess(bs) case Failure(ex) => callback.onError(ex) } } } }
Example 57
Source File: QueryResultTypeCassandraMonixSpec.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra.monix import io.getquill.context.cassandra.QueryResultTypeCassandraSpec import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable class QueryResultTypeCassandraMonixSpec extends QueryResultTypeCassandraSpec { val context = testMonixDB import context._ def result[T](t: Task[T]) = await(t.runToFuture(global)) def result[T](t: Observable[T]) = await(t.foldLeftL(List.empty[T])(_ :+ _).runToFuture) override def beforeAll = { result(context.run(deleteAll)) result(context.run(liftQuery(entries).foreach(e => insert(e)))) () } "query" in { result(context.run(selectAll)) mustEqual entries } "stream" in { result(context.stream(selectAll)) mustEqual entries } "querySingle" - { "size" in { result(context.run(entitySize)) mustEqual 3 } "parametrized size" in { result(context.run(parametrizedSize(lift(10000)))) mustEqual 0 } } }
Example 58
Source File: EncodingSpec.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra.streaming import io.getquill.context.cassandra.EncodingSpecHelper import monix.reactive.Observable import io.getquill.Query class EncodingSpec extends EncodingSpecHelper { "encodes and decodes types" - { "stream" in { import monix.execution.Scheduler.Implicits.global import testStreamDB._ val result = for { _ <- testStreamDB.run(query[EncodingTestEntity].delete) inserts = Observable(insertValues: _*) _ <- Observable.fromTask(testStreamDB.run(liftQuery(insertValues).foreach(e => query[EncodingTestEntity].insert(e))).countL) result <- testStreamDB.run(query[EncodingTestEntity]) } yield { result } val f = result.foldLeftL(List.empty[EncodingTestEntity])(_ :+ _).runToFuture verify(await(f)) } } "encodes collections" - { "stream" in { import monix.execution.Scheduler.Implicits.global import testStreamDB._ val q = quote { (list: Query[Int]) => query[EncodingTestEntity].filter(t => list.contains(t.id)) } val result = for { _ <- testStreamDB.run(query[EncodingTestEntity].delete) inserts = Observable(insertValues: _*) _ <- Observable.fromTask(testStreamDB.run(liftQuery(insertValues).foreach(e => query[EncodingTestEntity].insert(e))).countL) result <- testStreamDB.run(q(liftQuery(insertValues.map(_.id)))) } yield { result } val f = result.foldLeftL(List.empty[EncodingTestEntity])(_ :+ _).runToFuture verify(await(f)) } } }
Example 59
Source File: QueryResultTypeCassandraStreamSpec.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra.streaming import io.getquill.context.cassandra.QueryResultTypeCassandraSpec import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable class QueryResultTypeCassandraStreamSpec extends QueryResultTypeCassandraSpec { val context = testStreamDB import context._ def result[T](t: Observable[T]) = await(t.foldLeftL(List.empty[T])(_ :+ _).runToFuture) override def beforeAll = { result(context.run(deleteAll)) result(context.run(liftQuery(entries).foreach(e => insert(e)))) () } "query" in { result(context.run(selectAll)) mustEqual entries } "querySingle" - { "size" in { result(context.run(entitySize)) mustEqual List(3) } "parametrized size" in { result(context.run(parametrizedSize(lift(10000)))) mustEqual List(0) } } }
Example 60
Source File: DecodeNullSpec.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra.streaming import io.getquill._ import monix.reactive.Observable class DecodeNullSpec extends Spec { "no default values when reading null" - { "stream" in { import monix.execution.Scheduler.Implicits.global import testStreamDB._ val writeEntities = quote(querySchema[DecodeNullTestWriteEntity]("DecodeNullTestEntity")) val result = for { _ <- testStreamDB.run(writeEntities.delete) _ <- Observable.fromTask(testStreamDB.run(writeEntities.insert(lift(insertValue))).countL) result <- testStreamDB.run(query[DecodeNullTestEntity]) } yield { result } intercept[IllegalStateException] { await { result.headL.runToFuture } } } } case class DecodeNullTestEntity(id: Int, value: Int) case class DecodeNullTestWriteEntity(id: Int, value: Option[Int]) val insertValue = DecodeNullTestWriteEntity(0, None) }
Example 61
Source File: PeopleMonixSpec.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill import io.getquill.context.monix.MonixJdbcContext import io.getquill.context.sql.PeopleSpec import monix.execution.Scheduler import monix.reactive.Observable trait PeopleMonixSpec extends PeopleSpec { implicit val scheduler = Scheduler.global val context: MonixJdbcContext[_, _] import context._ def collect[T](o: Observable[T]) = o.foldLeft(List[T]())({ case (l, elem) => elem +: l }) .firstL .runSyncUnsafe() val `Ex 11 query` = quote(query[Person]) val `Ex 11 expected` = peopleEntries }
Example 62
Source File: MainRunner.scala From cornichon with Apache License 2.0 | 5 votes |
package com.github.agourlay.cornichon.framework import java.util import cats.syntax.apply._ import com.github.agourlay.cornichon.CornichonFeature import com.github.agourlay.cornichon.core.CornichonError import com.github.agourlay.cornichon.framework.CornichonFeatureRunner._ import com.monovore.decline._ import com.openpojo.reflection.PojoClass import com.openpojo.reflection.impl.PojoClassFactory import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable import sbt.testing.TestSelector import scala.jdk.CollectionConverters._ import scala.concurrent.Await import scala.concurrent.duration.Duration object MainRunner { private val packageToScanOpts = Opts.option[String]("packageToScan", help = "Package containing the feature files.") private val reportsOutputDirOpts = Opts.option[String]("reportsOutputDir", help = "Output directory for junit.xml files (default to current).").withDefault(".") private val featureParallelismOpts = Opts.option[Int]("featureParallelism", help = "Number of feature running in parallel (default=1).") .validate("must be positive")(_ > 0).withDefault(1) private val seedOpts = Opts.option[Long]("seed", help = "Seed to use for starting random processes.").orNone private val scenarioNameFilterOpts = Opts.option[String]("scenarioNameFilter", help = "Filter scenario to run by name.").orNone private val mainRunnerCommand = Command( name = "cornichon-test-framework", header = "Run your cornichon features without SBT." )((packageToScanOpts, reportsOutputDirOpts, featureParallelismOpts, seedOpts, scenarioNameFilterOpts).tupled) def main(args: Array[String]): Unit = mainRunnerCommand.parse(args.toSeq, sys.env) match { case Left(help) => System.err.println(help) sys.exit(1) case Right((packageToScan, reportsOutputDir, featureParallelism, explicitSeed, scenarioNameFilter)) => JUnitXmlReporter.checkReportsFolder(reportsOutputDir) println("Starting feature classes discovery") val classes = discoverFeatureClasses(packageToScan) println(s"Found ${classes.size} feature classes") val scenarioNameFilterSet = scenarioNameFilter.toSet val f = Observable.fromIterable(classes) .mapParallelUnordered(featureParallelism) { featureClass => val startedAt = System.currentTimeMillis() val featureTypeName = featureClass.getTypeName val featureInfo = FeatureInfo(featureTypeName, featureClass, CornichonFingerprint, new TestSelector(featureTypeName)) val eventHandler = new RecordEventHandler() loadAndExecute(featureInfo, eventHandler, explicitSeed, scenarioNameFilterSet) .timed .map { case (duration, res) => JUnitXmlReporter.writeJunitReport(reportsOutputDir, featureTypeName, duration, startedAt, eventHandler.recorded) match { case Left(e) => println(s"ERROR: Could not generate JUnit xml report for $featureTypeName due to\n${CornichonError.genStacktrace(e)}") case Right(_) => () } res } } .foldLeftL(true)(_ && _) .runToFuture if (Await.result(f, Duration.Inf)) System.exit(0) else System.exit(1) } // https://stackoverflow.com/questions/492184/how-do-you-find-all-subclasses-of-a-given-class-in-java def discoverFeatureClasses(packageToExplore: String): List[Class[_]] = { val classes: util.List[PojoClass] = PojoClassFactory.enumerateClassesByExtendingType(packageToExplore, classOf[CornichonFeature], null) classes.iterator().asScala.collect { case pojo if pojo.isConcrete => pojo.getClazz }.toList } }
Example 63
Source File: RepeatConcurrentlyStep.scala From cornichon with Apache License 2.0 | 5 votes |
package com.github.agourlay.cornichon.steps.wrapped import cats.data.StateT import cats.instances.list._ import cats.syntax.foldable._ import cats.syntax.either._ import com.github.agourlay.cornichon.core._ import com.github.agourlay.cornichon.core.Done._ import monix.eval.Task import monix.reactive.Observable import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal case class RepeatConcurrentlyStep(times: Int, nested: List[Step], parallelism: Int, maxTime: FiniteDuration) extends WrapperStep { require(parallelism > 0, "repeat concurrently block must contain a positive 'parallelism' factor") require(times > 0, "repeat concurrently block must contain a positive 'times' factor") require(times >= parallelism, "repeat concurrently block must contain a 'parallelism' factor <= to the number of repeat 'times'") val title = s"Repeat concurrently block '$times' times with parallel factor '$parallelism' and maxTime '$maxTime'" override val stateUpdate: StepState = StateT { runState => val nestedRunState = runState.nestedContext val initialDepth = runState.depth Observable.fromIterable(List.fill(times)(Done)) .mapParallelUnordered(parallelism)(_ => ScenarioRunner.runStepsShortCircuiting(nested, nestedRunState)) .takeUntil(Observable.evalDelayed(maxTime, Done)) .toListL .timed .flatMap { case (executionTime, results) => if (results.size != times) { val error = RepeatConcurrentlyTimeout(times, results.size) val errorState = runState.recordLog(failedTitleLog(initialDepth)).recordLog(FailureLogInstruction(error.renderedMessage, initialDepth, Some(executionTime))) val failedStep = FailedStep.fromSingle(this, error) Task.now(errorState -> failedStep.asLeft) } else { val failedStepRuns = results.collect { case (s, r @ Left(_)) => (s, r) } failedStepRuns.headOption.fold[Task[(RunState, Either[FailedStep, Done])]] { val successStepsRun = results.collect { case (s, r @ Right(_)) => (s, r) } val allRunStates = successStepsRun.map(_._1) //TODO all logs should be merged? // all runs were successful, we pick the first one for the logs val firstStateLog = allRunStates.head.logStack val wrappedLogStack = SuccessLogInstruction(s"Repeat concurrently block succeeded", initialDepth, Some(executionTime)) +: firstStateLog :+ successTitleLog(initialDepth) // TODO merge all sessions together - require diffing Sessions otherwise it produces a huge map full of duplicate as they all started from the same. val updatedSession = allRunStates.head.session // merge all cleanups steps val allCleanupSteps = allRunStates.foldMap(_.cleanupSteps) val successState = runState.withSession(updatedSession).recordLogStack(wrappedLogStack).registerCleanupSteps(allCleanupSteps) Task.now(successState -> rightDone) } { case (s, failedStep) => val ratio = s"'${failedStepRuns.size}/$times' run(s)" val wrapLogStack = FailureLogInstruction(s"Repeat concurrently block failed for $ratio", initialDepth) +: s.logStack :+ failedTitleLog(initialDepth) Task.now(runState.mergeNested(s, wrapLogStack) -> failedStep) } } }.onErrorRecover { case NonFatal(e) => val failedStep = FailedStep.fromSingle(this, RepeatConcurrentlyError(e)) (runState.recordLog(failedTitleLog(initialDepth)), failedStep.asLeft) } } } case class RepeatConcurrentlyTimeout(times: Int, success: Int) extends CornichonError { lazy val baseErrorMessage = s"Repeat concurrently block did not reach completion in time: $success/$times finished" } case class RepeatConcurrentlyError(cause: Throwable) extends CornichonError { lazy val baseErrorMessage = "Repeat concurrently block has thrown an error" override val causedBy = CornichonError.fromThrowable(cause) :: Nil }
Example 64
Source File: ConcurrentlyStep.scala From cornichon with Apache License 2.0 | 5 votes |
package com.github.agourlay.cornichon.steps.wrapped import cats.data.StateT import cats.instances.list._ import cats.syntax.foldable._ import com.github.agourlay.cornichon.core._ import com.github.agourlay.cornichon.core.Done._ import monix.eval.Task import monix.reactive.Observable import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal case class ConcurrentlyStep(nested: List[Step], maxTime: FiniteDuration) extends WrapperStep { val title = s"Concurrently block with maxTime '$maxTime'" override val stateUpdate: StepState = StateT { runState => val nestedRunState = runState.nestedContext val initialDepth = runState.depth Observable.fromIterable(nested) .mapParallelUnordered(nested.size)(s => ScenarioRunner.runStepsShortCircuiting(s :: Nil, nestedRunState)) .takeUntil(Observable.evalDelayed(maxTime, ())) .toListL .timed .flatMap { case (executionTime, results) => if (results.size != nested.size) { val error = ConcurrentlyTimeout(nested.size, results.size) val errorState = runState.recordLog(failedTitleLog(initialDepth)).recordLog(FailureLogInstruction(error.renderedMessage, initialDepth, Some(executionTime))) val failedStep = FailedStep.fromSingle(this, error) Task.now(errorState -> Left(failedStep)) } else { val failedStepRuns = results.collect { case (s, r @ Left(_)) => (s, r) } failedStepRuns.headOption.fold[Task[(RunState, Either[FailedStep, Done])]] { val successStepsRun = results.collect { case (s, r @ Right(_)) => (s, r) } val allRunStates = successStepsRun.map(_._1) //TODO all logs should be merged? // all runs were successful, we pick the first one for the logs val firstStateLog = allRunStates.head.logStack val wrappedLogStack = SuccessLogInstruction(s"Concurrently block succeeded", initialDepth, Some(executionTime)) +: firstStateLog :+ successTitleLog(initialDepth) // TODO merge all sessions together - require diffing Sessions or it produces a huge map full of duplicate as they all started from the same. val updatedSession = allRunStates.head.session // merge all cleanups steps val allCleanupSteps = allRunStates.foldMap(_.cleanupSteps) val successState = runState.withSession(updatedSession).recordLogStack(wrappedLogStack).registerCleanupSteps(allCleanupSteps) Task.now((successState, rightDone)) } { case (s, failedStep) => val ratio = s"'${failedStepRuns.size}/${nested.size}' run(s)" val wrapLogStack = FailureLogInstruction(s"Concurrently block failed for $ratio", initialDepth) +: s.logStack :+ failedTitleLog(initialDepth) Task.now((runState.mergeNested(s, wrapLogStack), failedStep)) } } }.onErrorRecover { case NonFatal(e) => val failedStep = FailedStep.fromSingle(this, ConcurrentlyError(e)) (runState.recordLog(failedTitleLog(initialDepth)), Left(failedStep)) } } } case class ConcurrentlyTimeout(total: Int, success: Int) extends CornichonError { lazy val baseErrorMessage = s"Concurrently block did not reach completion in time: $success/$total finished" } case class ConcurrentlyError(cause: Throwable) extends CornichonError { lazy val baseErrorMessage = "Concurrently block has thrown an error" override val causedBy = CornichonError.fromThrowable(cause) :: Nil }
Example 65
Source File: FeatureRunner.scala From cornichon with Apache License 2.0 | 5 votes |
package com.github.agourlay.cornichon.core import com.github.agourlay.cornichon.dsl.BaseFeature import com.github.agourlay.cornichon.matchers.MatcherResolver import monix.eval.Task import monix.reactive.Observable case class FeatureRunner(featureDef: FeatureDef, baseFeature: BaseFeature, explicitSeed: Option[Long]) { private val featureContext = FeatureContext( beforeSteps = baseFeature.beforeEachScenario.toList, finallySteps = baseFeature.afterEachScenario.toList, featureIgnored = featureDef.ignored.isDefined, focusedScenarios = featureDef.focusedScenarios, withSeed = explicitSeed.orElse(baseFeature.seed), customExtractors = baseFeature.registerExtractors, allMatchers = (MatcherResolver.builtInMatchers ::: baseFeature.registerMatchers).groupBy(_.key) ) final def runScenario(s: Scenario): Task[ScenarioReport] = { println(s"Starting scenario '${s.name}'") ScenarioRunner.runScenario(Session.newEmpty, featureContext)(s) } final def runFeature(filterScenario: Scenario => Boolean)(scenarioResultHandler: ScenarioReport => ScenarioReport): Task[List[ScenarioReport]] = { val scenariosToRun = featureDef.scenarios.filter(filterScenario) if (scenariosToRun.isEmpty) FeatureRunner.noop else { // Run 'before feature' hooks baseFeature.beforeFeature.foreach(f => f()) // featureParallelism is limited to avoid spawning too much work at once val featureParallelism = if (baseFeature.executeScenariosInParallel) Math.min(scenariosToRun.size, FeatureRunner.maxParallelism) else 1 Observable.fromIterable(scenariosToRun) .mapParallelUnordered(featureParallelism)(runScenario(_).map(scenarioResultHandler)) .toListL .map { results => // Run 'after feature' hooks baseFeature.afterFeature.foreach(f => f()) results } } } } object FeatureRunner { // the tests are mostly IO bound so we can start a bit more on a single core lazy val maxParallelism: Int = Runtime.getRuntime.availableProcessors() + 1 private val noop = Task.now(Nil) }
Example 66
Source File: TakeWhileInclusiveSuite.scala From tofu with Apache License 2.0 | 5 votes |
package tofu.observable import monix.catnap.MVar import monix.eval.Task import monix.execution.Scheduler.Implicits.global import monix.reactive.Observable import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ class TakeWhileInclusiveSuite extends AnyFlatSpec with Matchers { private def writeElement[A](mvar: MVar[Task, Vector[A]])(a: A): Task[Unit] = mvar.take.flatMap(v => mvar.put(v :+ a)) private def inclusiveElements[A](obs: Observable[A])(p: A => Boolean): Task[(Vector[A], List[A])] = for { mvar <- MVar[Task].of(Vector.empty[A]) produced <- obs.doOnNext(writeElement(mvar)).takeWhileInclusive(p).toListL written <- mvar.read } yield (written, produced) "Observable.takeWhileInclusibe" should "produce and generate same elements" in { inclusiveElements(Observable.range(1, 100))(_ <= 10).runSyncUnsafe(Duration.Inf) shouldEqual ((Vector.range(1, 12), List.range(1, 12))) } }