scala.collection.immutable.Queue Scala Examples

The following examples show how to use scala.collection.immutable.Queue. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ExternalVariablesLoader.scala    From incubator-daffodil   with Apache License 2.0 5 votes vote down vote up
package org.apache.daffodil.externalvars

import scala.xml.parsing.ConstructingParser
import java.io.File
import java.net.URI

import scala.xml.Node
import scala.io.Codec.string2codec
import org.apache.daffodil.processors.{ VariableUtils, VariableMap }
import org.apache.daffodil.exceptions.Assert
import org.apache.daffodil.util.Misc._
import org.apache.daffodil.exceptions.ThrowsSDE

import scala.collection.immutable.Queue


object ExternalVariablesLoader {

  def loadVariables(bindings: Seq[Binding], referringContext: ThrowsSDE, vmap: VariableMap): VariableMap = {
    Assert.usage(referringContext != null, "loadVariables expects 'referringContext' to not be null!")
    VariableUtils.setExternalVariables(vmap, bindings, referringContext)
    vmap
  }

  // The following are methods that retrieve and transform variables into Seq[Binding]

  def mapToBindings(vars: Map[String, String]): Queue[Binding] = {
    val varsKVP = vars.map {
      case (name, value) => {
        Binding(name, value)
      }
    }
    Queue.empty.enqueue(varsKVP)
  }

  def uriToBindings(uri: URI): Queue[Binding] = {
    Assert.usage(uri ne null)
    val file = new File(uri)
    fileToBindings(file)
  }

  def fileToBindings(file: File): Queue[Binding] = {
    Assert.usage(file ne null)
    ExternalVariablesValidator.validate(file) match {
      case Left(ex) => Assert.abort(ex)
      case Right(_) => // Success
    }
    val enc = determineEncoding(file) // The encoding is needed for ConstructingParser
    val input = scala.io.Source.fromURI(file.toURI)(enc)
    val node = ConstructingParser.fromSource(input, true).document.docElem
    nodeToBindings(node)
  }

  def nodeToBindings(node: Node): Queue[Binding] = {
    Assert.usage(node ne null)
    val newBindings = Binding.getBindings(node)
    var res = Queue.empty[Binding]
    // couldn't get the enqueue(iterable) method overload to resolve.
    // So just doing this one by one
    newBindings.foreach{ b => res = res.enqueue(b) }
    res
  }
} 
Example 2
Source File: Router.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.routing

import io.orkestra.cluster.protocol.Response.Success.RouteeDeleted
import io.orkestra.cluster.routing.ClusterListener.DeleteRoutee

import scala.collection.immutable.Queue
import akka.actor._
import akka.cluster.{Member, Cluster}

class RouterRR(memberId: String, cluster: Cluster)
    extends Actor
    with ActorLogging {

  import RouterRR._

  var members: Queue[ActorRef] = Queue()

  var quarantineMembers: List[ActorRef] = List.empty[ActorRef]

  def receive = {

    case GetRoutee(role) =>
      sender ! Routee(getMember)

    case GetRoutees =>
      sender ! members.toList

    case RegisterRoutee(path) =>
      if (isQuarantine(path))
        recoverMember(path)
      else
        probeRoutee(path)

    case RemoveRoutee(path) =>
      removeMember(path)

    case DeleteRoutee(role, path) =>
      members = members.filterNot(_.path.toString == path)
      sender ! RouteeDeleted(role, path)

    case QuarantineRoutee(path) =>
      quarantineMember(path)

    case RecoverRoutee(path) =>
      recoverMember(path)

    case CleanQuarantine(path) =>
      quarantineCleaner(path)

    case ActorIdentity(`memberId`, Some(routeeRef)) =>
      registerMember(routeeRef)

    case ActorIdentity(`memberId`, None) =>
      log.warning(s"member with id $memberId not found")

    case Terminated(memberRef) =>
      log.info(s"Member ${memberRef.path} was Terminated")
      removeMember(memberRef.path)
      SupervisorStrategy

  }

  
  def quarantineCleaner(path: ActorPath) = {
    log.debug(s"Quarantine is being cleaned of $path...")
    quarantineMembers.filter(_.path == path).map { m =>
      log.warning(s"Removing quarantined member ${m.path.address}")
      cluster.down(m.path.address)
    }
  }
}

object RouterRR {
  case class RegisterRoutee(x: ActorPath)
  case class RemoveRoutee(x: ActorPath)
  case class QuarantineRoutee(x: ActorPath)
  case class RecoverRoutee(x: ActorPath)
  case class GetRoutee(role: String)
  case object GetRoutees
  case class Routee(ref: Option[ActorRef])
  case class CleanQuarantine(path: ActorPath)
} 
Example 3
Source File: ReactElementContainer.scala    From slinky   with MIT License 5 votes vote down vote up
package slinky.core

import slinky.core.facade.ReactElement

import scala.collection.immutable.{Iterable, Queue}
import scala.concurrent.Future
import scala.scalajs.js
import scala.util.Try

trait ReactElementContainer[F[_]] extends Any { self =>
  def map[A](fa: F[A])(f: A => ReactElement): F[ReactElement]
}

object ReactElementContainer {
  def apply[F[_]: ReactElementContainer]: ReactElementContainer[F] = implicitly[ReactElementContainer[F]]

  @inline implicit def function0Container: ReactElementContainer[Function0] = new ReactElementContainer[Function0] {
    override def map[A](fa: () => A)(f: A => ReactElement): () => ReactElement = () => f(fa())
  }

  @inline implicit def futureContainer: ReactElementContainer[Future] = new ReactElementContainer[Future] {
    import scala.concurrent.ExecutionContext.Implicits.global
    override def map[A](fa: Future[A])(f: A => ReactElement): Future[ReactElement] = fa.map(f)
  }

  @inline implicit def iterableContainer: ReactElementContainer[Iterable] = new ReactElementContainer[Iterable] {
    override def map[A](fa: Iterable[A])(f: A => ReactElement): Iterable[ReactElement] = fa.map(f)
  }

  @inline implicit def jsUndefOrContainer: ReactElementContainer[js.UndefOr] = new ReactElementContainer[js.UndefOr] {
    override def map[A](fa: js.UndefOr[A])(f: A => ReactElement): js.UndefOr[ReactElement] = fa.map(f)
  }

  @inline implicit def listContainer: ReactElementContainer[List] = new ReactElementContainer[List] {
    override def map[A](fa: List[A])(f: A => ReactElement): List[ReactElement] = fa.map(f)
  }

  @inline implicit def optionContainer: ReactElementContainer[Option] = new ReactElementContainer[Option] {
    override def map[A](fa: Option[A])(f: A => ReactElement): Option[ReactElement] = fa.map(f)
  }

  @inline implicit def queueContainer: ReactElementContainer[Queue] = new ReactElementContainer[Queue] {
    override def map[A](fa: Queue[A])(f: A => ReactElement): Queue[ReactElement] = fa.map(f)
  }

  @inline implicit def seqContainer: ReactElementContainer[Seq] = new ReactElementContainer[Seq] {
    override def map[A](fa: Seq[A])(f: A => ReactElement): Seq[ReactElement] = fa.map(f)
  }

  @inline implicit def setContainer: ReactElementContainer[Set] = new ReactElementContainer[Set] {
    override def map[A](fa: Set[A])(f: A => ReactElement): Set[ReactElement] = fa.map(f)
  }

  @inline implicit def someContainer: ReactElementContainer[Some] = new ReactElementContainer[Some] {
    override def map[A](fa: Some[A])(f: A => ReactElement): Some[ReactElement] = Some(fa.map(f).get)
  }

  @inline implicit def tryContainer: ReactElementContainer[Try] = new ReactElementContainer[Try] {
    override def map[A](fa: Try[A])(f: A => ReactElement): Try[ReactElement] = fa.map(f)
  }

  @inline implicit def vectorContainer: ReactElementContainer[Vector] = new ReactElementContainer[Vector] {
    override def map[A](fa: Vector[A])(f: A => ReactElement): Vector[ReactElement] = fa.map(f)
  }
} 
Example 4
Source File: TrampolineECTests.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import org.scalatest.matchers.should.Matchers
import org.scalatest.funsuite.AnyFunSuite
import cats.effect.internals.TrampolineEC.immediate
import scala.concurrent.ExecutionContext
import cats.effect.internals.IOPlatform.isJVM
import scala.collection.immutable.Queue

class TrampolineECTests extends AnyFunSuite with Matchers with TestUtils {
  implicit val ec: ExecutionContext = immediate

  def executeImmediate(f: => Unit): Unit =
    ec.execute(new Runnable { def run(): Unit = f })

  test("execution should be immediate") {
    var effect = 0

    executeImmediate {
      effect += 1
      executeImmediate {
        effect += 2
        executeImmediate {
          effect += 3
        }
      }
    }

    effect shouldEqual 1 + 2 + 3
  }

  test("concurrent execution") {
    var effect = List.empty[Int]

    executeImmediate {
      executeImmediate { effect = 1 :: effect }
      executeImmediate { effect = 2 :: effect }
      executeImmediate { effect = 3 :: effect }
    }

    effect shouldEqual List(1, 2, 3)
  }

  test("stack safety") {
    var effect = 0
    def loop(n: Int, acc: Int): Unit =
      executeImmediate {
        if (n > 0) loop(n - 1, acc + 1)
        else effect = acc
      }

    val n = if (isJVM) 100000 else 5000
    loop(n, 0)

    effect shouldEqual n
  }

  test("on blocking it should fork") {
    assume(isJVM, "test relevant only for the JVM")
    import scala.concurrent.blocking

    var effects = Queue.empty[Int]
    executeImmediate {
      executeImmediate { effects = effects.enqueue(4) }
      executeImmediate { effects = effects.enqueue(4) }

      effects = effects.enqueue(1)
      blocking { effects = effects.enqueue(2) }
      effects = effects.enqueue(3)
    }

    effects shouldBe Queue(1, 4, 4, 2, 3)
  }

  test("thrown exceptions should get logged to System.err (immediate)") {
    val dummy1 = new RuntimeException("dummy1")
    val dummy2 = new RuntimeException("dummy2")
    var effects = 0

    val output = catchSystemErr {
      executeImmediate {
        executeImmediate(effects += 1)
        executeImmediate(effects += 1)
        executeImmediate {
          executeImmediate(effects += 1)
          executeImmediate(effects += 1)
          throw dummy2
        }
        throw dummy1
      }
    }

    output should include("dummy1")
    output should include("dummy2")
    effects shouldBe 4
  }
} 
Example 5
Source File: AsyncHttpClientPipedFs2WebsocketsTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.asynchttpclient.fs2

import cats.effect.concurrent.Ref
import cats.effect.IO
import cats.implicits._
import fs2._
import sttp.client._
import sttp.client.asynchttpclient.WebSocketHandler
import sttp.client.impl.cats.CatsTestBase
import sttp.client.impl.fs2.Fs2WebSockets
import sttp.client.testing.ToFutureWrapper
import sttp.client.ws.WebSocket
import sttp.model.ws.WebSocketFrame
import sttp.client.testing.HttpTest.wsEndpoint

import scala.collection.immutable.Queue
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers

class AsyncHttpClientPipedFs2WebsocketsTest extends AsyncFlatSpec with Matchers with ToFutureWrapper with CatsTestBase {
  implicit val backend: SttpBackend[IO, Nothing, WebSocketHandler] = AsyncHttpClientFs2Backend[IO]().unsafeRunSync()

  def createHandler: Option[Int] => IO[WebSocketHandler[WebSocket[IO]]] = Fs2WebSocketHandler[IO](_)

  it should "run a simple echo pipe" in {
    basicRequest
      .get(uri"$wsEndpoint/ws/echo")
      .openWebsocketF(createHandler(None))
      .product(Ref.of[IO, Queue[String]](Queue.empty))
      .flatMap {
        case (response, results) =>
          Fs2WebSockets.handleSocketThroughTextPipe(response.result) { in =>
            val receive = in.evalMap(m => results.update(_.enqueue(m)))
            val send = Stream("Message 1".asRight, "Message 2".asRight, WebSocketFrame.close.asLeft)
            send merge receive.drain
          } >> results.get.map(_ should contain theSameElementsInOrderAs List("echo: Message 1", "echo: Message 2"))
      }
      .toFuture()
  }

  it should "run a simple read-only client" in {
    basicRequest
      .get(uri"$wsEndpoint/ws/send_and_wait")
      .openWebsocketF(createHandler(None))
      .product(Ref.of[IO, Queue[String]](Queue.empty))
      .flatMap {
        case (response, results) =>
          Fs2WebSockets.handleSocketThroughTextPipe(response.result) { in =>
            in.evalMap(m => results.update(_.enqueue(m)).flatMap(_ => results.get.map(_.size))).flatMap {
              case 2 => Stream(None) // terminating the stream
              case _ => Stream.empty // waiting for more messages
            }.unNoneTerminate
          } >> results.get.map(_ should contain theSameElementsInOrderAs List("test10", "test20"))
      }
      .toFuture()
  }
} 
Example 6
Source File: TcpServiceImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate_server

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}
import java.util.UUID
import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Queue

@SuppressWarnings(Array("org.wartremover.warts.Var")) class ChannelHandler(
  channel: AsynchronousSocketChannel, unregister: ()=>Unit, fail: Throwable=>Unit,
  executor: ScheduledExecutorService, timeout: Long, val compressor: Option[Compressor]
) extends CompletionHandler[Integer,Unit] with SenderToAgent {
  private var queue: Queue[Array[Byte]] = Queue.empty
  private var activeElement: Option[ByteBuffer] = None
  private var purge: Option[ScheduledFuture[_]] = None
  private def startWrite(): Unit =
    queue.dequeueOption.foreach{ case (element,nextQueue) =>
      queue = nextQueue
      activeElement = Option(ByteBuffer.wrap(element))
      channel.write[Unit](activeElement.get, (), this)
    }
  def add(data: Array[Byte]): Unit = synchronized {
    queue = queue.enqueue(data)
    if(activeElement.isEmpty) startWrite()
  }
  def completed(result: Integer, att: Unit): Unit = Trace {
    synchronized {
      if(activeElement.get.hasRemaining) channel.write[Unit](activeElement.get, (), this)
      else {
        purge.foreach(_.cancel(false))
        purge = Option(executor.schedule(new Runnable {
          def run(): Unit = close()
        },timeout,TimeUnit.SECONDS))
        activeElement = None
        startWrite()
      }
    }
  }
  def failed(exc: Throwable, att: Unit): Unit = {
    fail(exc)
    close()
  }
  def close(): Unit = {
    unregister()
    channel.close()  //does close block?
  }
}

class TcpServerImpl(
  port: Int, tcpHandler: TcpHandler, timeout: Long, compressorFactory: StreamCompressorFactory,
  channels: TrieMap[String,ChannelHandler] = TrieMap()
) extends TcpServer with Executable with LazyLogging {
  def getSender(connectionKey: String): Option[SenderToAgent] =
    channels.get(connectionKey)
  def run(): Unit = concurrent.blocking{
    tcpHandler.beforeServerStart()
    val address = new InetSocketAddress(port)
    val listener = AsynchronousServerSocketChannel.open().bind(address)
    val executor = Executors.newScheduledThreadPool(1)
    listener.accept[Unit]((), new CompletionHandler[AsynchronousSocketChannel,Unit] {
      def completed(ch: AsynchronousSocketChannel, att: Unit): Unit = Trace {
        listener.accept[Unit]((), this)
        val key = UUID.randomUUID.toString
        val sender = new ChannelHandler(ch, {() =>
          assert(channels.remove(key).nonEmpty)
          tcpHandler.afterDisconnect(key)
        }, { error =>
          logger.error("channel",error)
        }, executor, timeout, compressorFactory.create())
        assert(channels.put(key,sender).isEmpty)
        tcpHandler.afterConnect(key, sender)
      }
      def failed(exc: Throwable, att: Unit): Unit = logger.error("tcp",exc) //! may be set status-finished
    })
  }
} 
Example 7
Source File: ModelUtils.scala    From DataQuality   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package dbmodel

import play.api.Logger

import scala.collection.immutable.Queue


object ModelUtils {

  private lazy val separator: String = ","

  def toSeparatedString(seq: Seq[String]): Option[String] = {
    if (seq.isEmpty) None else Some(seq.mkString(separator))
  }

  def parseSeparatedString(str: Option[String]): Seq[String] = {
    str match {
      case Some(kf) => kf.split(",").toSeq
      case None => Seq.empty
    }
  }

  def findOptimalOrder(graph: Map[String, Seq[String]]): Queue[String] = {

    def loop(current: (String, Seq[String]))(gr: Map[String, Seq[String]], o: Queue[String], v: Set[String]): Queue[String] = {
      Logger.debug(s"Processing tuple: $current")
      Logger.debug(s"Current state: v{${v.mkString(",")}} : o{${o.mkString(",")}}")
      if (v.contains(current._1)) throw new IllegalArgumentException("Graph isn't acyclic.")
      else if (o.contains(current._1)) o
      else {
        val resolved: Queue[String] = current._2.foldLeft(o) {
          case (agg, curr) =>
            curr match {
              case x if x == current._1 => throw new IllegalArgumentException("Graph isn't acyclic.")
              case x if !o.contains(x) =>
                gr.get(x) match {
                  case Some(tail) =>
                    Logger.debug(s"Next: ${current._1} -> $x")
                    loop((x, tail))(gr, agg, v + current._1)
                  case None => Queue.empty[String]
                }
              case _ => Queue.empty[String]
            }
        }
        val order: Queue[String] = if (resolved.nonEmpty) resolved :+ current._1 else o :+ current._1
        Logger.debug(s"Put ${current._1} to o: {${order.mkString(",")}}")
        order
      }
    }

    graph.toSeq.foldLeft(Queue.empty[String]) {
      case (ord: Queue[String], curr: (String, Seq[String])) =>
        val r = loop(curr)(graph, ord, Set.empty)
        Logger.debug(s"${curr._1} :: ${r.mkString(", ")}")
        r
    }
  }

} 
Example 8
Source File: DAGProcessorSpec.scala    From DataQuality   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package backend

import org.scalatestplus.play._

import scala.collection.immutable.Queue



class DAGProcessorSpec extends PlaySpec {

  import models.ModelUtils._
  import play.api.Logger

  "Graph processor" must {

    val tg1: Map[String, Seq[String]] = Map(
      "1" -> Seq("3"),
      "2" -> Seq("1"),
      "3" -> Seq("4", "5"),
      "4" -> Seq("ext1"),
      "5" -> Seq("ext3"),
      "6" -> Seq("7"),
      "7" -> Seq("3"),
      "8" -> Seq("ext2")
    )

    val tg2: Map[String, Seq[String]] = Map(
      "4" -> Seq("2","3"),
      "1" -> Seq("e1"),
      "2" -> Seq("1"),
      "3" -> Seq("1")
    )
    val tg3: Map[String, Seq[String]] = Map(
      "1" -> Seq("2"),
      "2" -> Seq("3"),
      "3" -> Seq("4"),
      "4" -> Seq("e1")
    )

    val atg1: Map[String, Seq[String]] = Map(
      "2" -> Seq("1"),
      "1" -> Seq("2")
    )

    val atg2: Map[String, Seq[String]] = Map(
      "1" -> Seq("3", "6"),
      "2" -> Seq("1"),
      "3" -> Seq("4", "5","1"),
      "4" -> Seq("ext1"),
      "5" -> Seq("ext3"),
      "6" -> Seq("7"),
      "7" -> Seq("3"),
      "8" -> Seq("ext2")
    )

    val atg3: Map[String, Seq[String]] = Map(
      "1" -> Seq("2"),
      "2" -> Seq("3"),
      "3" -> Seq("4"),
      "4" -> Seq("1")
    )

    "tg1: return same size seq" in {
      val res = findOptimalOrder(tg1)
      Logger.info(res.mkString(" > "))
      assert(res.size == tg1.keys.size)
    }
    "tg2: return same size seq" in {
      val res = findOptimalOrder(tg2)
      Logger.info(res.mkString(" > "))
      assert(res.size == tg2.keys.size)
    }
    "tg3: return correct order" in {
      val res: Queue[String] = findOptimalOrder(tg3)
      Logger.info(res.mkString(" > "))
      assert(res == Queue("4","3","2","1"))
    }
    "atg1: should throw error" in {
      assertThrows[IllegalArgumentException](findOptimalOrder(atg1))
    }
    "atg2: should throw error" in {
      assertThrows[IllegalArgumentException](findOptimalOrder(atg2))
    }
    "atg3: should throw error" in {
      assertThrows[IllegalArgumentException](findOptimalOrder(atg3))
    }

  }
} 
Example 9
Source File: ModelUtils.scala    From DataQuality   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package models

import play.api.Logger

import scala.collection.immutable.Queue


object ModelUtils {

  private lazy val separator: String = ","

  def toSeparatedString(seq: Seq[String]): Option[String] = {
    if (seq.isEmpty) None else Some(seq.mkString(separator))
  }

  def parseSeparatedString(str: Option[String]): Seq[String] = {
    str match {
      case Some(kf) => kf.split(",").toSeq
      case None => Seq.empty
    }
  }

  def findOptimalOrder(graph: Map[String, Seq[String]]): Queue[String] = {

    def loop(current: (String, Seq[String]))(gr: Map[String, Seq[String]], o: Queue[String], v: Set[String]): Queue[String] = {
      Logger.debug(s"Processing tuple: $current")
      Logger.debug(s"Current state: v{${v.mkString(",")}} : o{${o.mkString(",")}}")
      if (v.contains(current._1)) throw new IllegalArgumentException("Graph isn't acyclic.")
      else if (o.contains(current._1)) o
      else {
        val resolved: Queue[String] = current._2.foldLeft(o) {
          case (agg, curr) =>
            curr match {
              case x if x == current._1 => throw new IllegalArgumentException("Graph isn't acyclic.")
              case x if !o.contains(x) =>
                gr.get(x) match {
                  case Some(tail) =>
                    Logger.debug(s"Next: ${current._1} -> $x")
                    loop((x, tail))(gr, agg, v + current._1)
                  case None => Queue.empty[String]
                }
              case _ => Queue.empty[String]
            }
        }
        val order: Queue[String] = if (resolved.nonEmpty) resolved :+ current._1 else o :+ current._1
        Logger.debug(s"Put ${current._1} to o: {${order.mkString(",")}}")
        order
      }
    }

    graph.toSeq.foldLeft(Queue.empty[String]) {
      case (ord: Queue[String], curr: (String, Seq[String])) =>
        val r = loop(curr)(graph, ord, Set.empty)
        Logger.debug(s"${curr._1} :: ${r.mkString(", ")}")
        r
    }
  }

} 
Example 10
Source File: WorkQueue.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import scala.collection.immutable.Queue

object WorkQueue {

  def empty: WorkQueue = WorkQueue(
    pendingWork = Queue.empty,
    workInProgress = Map.empty,
    acceptedWorkIds = Set.empty,
    doneWorkIds = Set.empty)

  trait WorkDomainEvent
  case class WorkAccepted(work: Work) extends WorkDomainEvent
  case class WorkStarted(workId: String) extends WorkDomainEvent
  case class WorkCompleted(workId: String, result: WorkResult) extends WorkDomainEvent
  case class WorkerFailed(workId: String) extends WorkDomainEvent
  case class WorkerTimedOut(workId: String) extends WorkDomainEvent

}

case class WorkQueue private (
  private val pendingWork: Queue[Work],
  private val workInProgress: Map[String, Work],
  private val acceptedWorkIds: Set[String],
  private val doneWorkIds: Set[String]) {

  import WorkQueue._

  def hasWork: Boolean = pendingWork.nonEmpty
  def nextWork: Work = pendingWork.head
  def isAccepted(workId: String): Boolean = acceptedWorkIds.contains(workId)
  def isInProgress(workId: String): Boolean = workInProgress.contains(workId)
  def isDone(workId: String): Boolean = doneWorkIds.contains(workId)

  def updated(event: WorkDomainEvent): WorkQueue = event match {
    case WorkAccepted(work) =>
      copy(
        pendingWork = pendingWork enqueue work,
        acceptedWorkIds = acceptedWorkIds + work.workId)

    case WorkStarted(workId) =>
      val (work, rest) = pendingWork.dequeue
      require(workId == work.workId, s"WorkStarted expected workId $workId == ${work.workId}")
      copy(
        pendingWork = rest,
        workInProgress = workInProgress + (workId -> work))

    case WorkCompleted(workId, result) =>
      copy(
        workInProgress = workInProgress - workId,
        doneWorkIds = doneWorkIds + workId)

    case WorkerFailed(workId) =>
      copy(
        pendingWork = pendingWork enqueue workInProgress(workId),
        workInProgress = workInProgress - workId)

    case WorkerTimedOut(workId) =>
      copy(
        pendingWork = pendingWork enqueue workInProgress(workId),
        workInProgress = workInProgress - workId)
  }

} 
Example 11
Source File: DruidQueryHistory.scala    From spark-druid-olap   with Apache License 2.0 5 votes vote down vote up
package org.sparklinedata.druid.metadata

import org.apache.spark.sql.hive.thriftserver.sparklinedata.HiveThriftServer2

import scala.collection.immutable.Queue
import scala.language.implicitConversions

case class DruidQueryExecutionView(
                         stageId : Int,
                         partitionId : Int,
                         taskAttemptId : Long,
                         druidQueryServer : String,
                         druidSegIntervals : Option[List[(String,String)]],
                         startTime : String,
                         druidExecTime : Long,
                         queryExecTime : Long,
                         numRows : Int,
                         druidQuery : String,
                         sqlStmt: Option[String] = None
                         )

object DruidQueryHistory {

  

  lazy val maxSize = System.getProperty("sparkline.queryhistory.maxsize", "500").toInt


  class FiniteQueue[A](q: Queue[A]) {
    def enqueueFinite[B >: A](elem: B): Queue[B] = {
      var ret = q.enqueue(elem)
      while (ret.size > maxSize) { ret = ret.dequeue._2 }
      ret
    }
  }
  implicit def queue2finitequeue[A](q: Queue[A]) = new FiniteQueue[A](q)


  private var history : Queue[DruidQueryExecutionView] = Queue[DruidQueryExecutionView]()

  def add(dq : DruidQueryExecutionView) : Unit = synchronized {
    history = history.enqueueFinite(
      dq.copy(sqlStmt = HiveThriftServer2.getSqlStmt(dq.stageId))
    )
  }

  def isEmpty = history.isEmpty

  def clear : Unit = synchronized {
    history = Queue[DruidQueryExecutionView]()
  }

  def getHistory : List[DruidQueryExecutionView] = synchronized {
    history.toList
  }
} 
Example 12
Source File: CollectionConvertersSuite.scala    From pureconfig   with Mozilla Public License 2.0 5 votes vote down vote up
package pureconfig

import scala.collection.JavaConverters._
import scala.collection.immutable.{ HashSet, ListSet, Queue, TreeSet }

import com.typesafe.config.{ ConfigFactory, ConfigValueFactory, ConfigValueType }
import pureconfig.error.{ ConfigReaderFailures, ConvertFailure, WrongType }

class CollectionConvertersSuite extends BaseSuite {
  implicit override val generatorDrivenConfig = PropertyCheckConfiguration(minSuccessful = 100)

  behavior of "ConfigConvert"

  checkArbitrary[HashSet[String]]

  checkArbitrary[List[Float]]
  checkRead[List[Int]](
    // order of keys maintained
    ConfigValueFactory.fromMap(Map("2" -> 1, "0" -> 2, "1" -> 3).asJava) -> List(2, 3, 1),
    ConfigValueFactory.fromMap(Map("3" -> 2, "1" -> 4).asJava) -> List(4, 2),
    ConfigValueFactory.fromMap(Map("1" -> 1, "a" -> 2).asJava) -> List(1))

  checkFailures[List[Int]](
    ConfigValueFactory.fromMap(Map("b" -> 1, "a" -> 2).asJava) -> ConfigReaderFailures(
      ConvertFailure(WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)), emptyConfigOrigin, "")),
    ConfigValueFactory.fromMap(Map().asJava) -> ConfigReaderFailures(
      ConvertFailure(WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.LIST)), emptyConfigOrigin, "")))

  checkArbitrary[ListSet[Int]]

  checkArbitrary[Map[String, Int]]
  checkFailures[Map[String, Int]](
    // nested map should fail
    ConfigFactory.parseString("conf.a=1").root() -> ConfigReaderFailures(
      ConvertFailure(WrongType(ConfigValueType.OBJECT, Set(ConfigValueType.NUMBER)), stringConfigOrigin(1), "conf")),
    // wrong value type should fail
    ConfigFactory.parseString("{ a=b }").root() -> ConfigReaderFailures(
      ConvertFailure(WrongType(ConfigValueType.STRING, Set(ConfigValueType.NUMBER)), stringConfigOrigin(1), "a")))

  checkArbitrary[Queue[Boolean]]

  checkArbitrary[Set[Double]]
  checkRead[Set[Int]](
    ConfigValueFactory.fromMap(Map("1" -> 4, "2" -> 5, "3" -> 6).asJava) -> Set(4, 5, 6))

  checkArbitrary[Stream[String]]

  checkArbitrary[TreeSet[Int]]

  checkArbitrary[Vector[Short]]

  checkArbitrary[Option[Int]]

  checkArbitrary[Array[Int]]
} 
Example 13
Source File: BFSAndApps.scala    From algorithmaday   with GNU General Public License v3.0 5 votes vote down vote up
package org.pfcoperez.dailyalgorithm.applications

import scala.collection.immutable.Queue
import org.pfcoperez.dailyalgorithm.datastructures.graphs.directed.trees.binary._

object BFSAndApps extends App {

  
  def bfsWithAccFunction[T, R](acc: R, h: Int = 0)(
    toVisit: Queue[BinaryTree[T]])(update: (R, Int, T) => R)(
    inLevelOrder: (BinaryTree[T], BinaryTree[T]) => (BinaryTree[T], BinaryTree[T])): R =
    if (toVisit.isEmpty) acc
    else {
      val (currentNode, remToVisit) = toVisit.dequeue
      val (newToVisit: Queue[BinaryTree[T]], newAcc) = currentNode match {
        case Node(left, v, right) =>
          val (a, b) = inLevelOrder(left, right)
          (remToVisit ++ Seq(a, b), update(acc, h, v))
        case _ => remToVisit -> acc
      }
      bfsWithAccFunction[T, R](newAcc, h + 1)(newToVisit)(update)(inLevelOrder)
    }

  val o = Node(Empty, 15, Empty)
  val n = Node(Empty, 14, Empty)
  val m = Node(Empty, 13, Empty)
  val l = Node(Empty, 12, Empty)
  val k = Node(Empty, 11, Empty)
  val j = Node(Empty, 10, Empty)
  val i = Node(Empty, 9, Empty)
  val h = Node(Empty, 8, Empty)

  val g = Node(n, 7, o)
  val f = Node(l, 6, m)
  val e = Node(j, 5, k)
  val d = Node(h, 4, i)

  val b = Node(d, 2, e)
  val c = Node(f, 3, g)

  val a = Node(b, 1, c)

  println(levelOrderTreeTraversal(a))

} 
Example 14
Source File: InsertOrdSet.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.data


import scala.collection.immutable.{HashSet, Set, Queue}
import scala.collection.{SetLike, AbstractSet}
import scala.collection.generic.{
  ImmutableSetFactory,
  GenericCompanion,
  CanBuildFrom,
  GenericSetTemplate
}

final class InsertOrdSet[T] private (_items: Queue[T], _hashSet: HashSet[T])
    extends AbstractSet[T]
    with Set[T]
    with SetLike[T, InsertOrdSet[T]]
    with GenericSetTemplate[T, InsertOrdSet]
    with Serializable {
  override def empty: InsertOrdSet[T] = InsertOrdSet.empty
  override def size: Int = _hashSet.size

  def iterator: Iterator[T] =
    _items.reverseIterator

  override def contains(elem: T): Boolean =
    _hashSet.contains(elem)

  override def +(elem: T): InsertOrdSet[T] =
    if (_hashSet.contains(elem))
      this
    else
      new InsertOrdSet(
        elem +: _items,
        _hashSet + elem
      )

  override def -(elem: T): InsertOrdSet[T] =
    new InsertOrdSet(
      _items.filter(elem2 => elem != elem2),
      _hashSet - elem
    )

  override def companion: GenericCompanion[InsertOrdSet] = InsertOrdSet

}

object InsertOrdSet extends ImmutableSetFactory[InsertOrdSet] {
  private val Empty = new InsertOrdSet(Queue.empty, HashSet.empty)
  override def empty[T] = Empty.asInstanceOf[InsertOrdSet[T]]
  def emptyInstance: InsertOrdSet[Any] = empty[Any]

  def fromSeq[T](s: Seq[T]): InsertOrdSet[T] =
    new InsertOrdSet(Queue(s.reverse: _*), HashSet(s: _*))

  implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, InsertOrdSet[A]] =
    setCanBuildFrom[A]

} 
Example 15
Source File: BatchingClient.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.bitcoind.rpc

import akka.actor.{Actor, ActorLogging, ActorRef, Status}
import akka.pattern.pipe
import fr.acinq.eclair.blockchain.bitcoind.rpc.BatchingClient.Pending

import scala.collection.immutable.Queue

class BatchingClient(rpcClient: BasicBitcoinJsonRPCClient) extends Actor with ActorLogging {

  import scala.concurrent.ExecutionContext.Implicits.global

  override def receive: Receive = {
    case request: JsonRPCRequest =>
      // immediately process isolated request
      process(queue = Queue(Pending(request, sender)))
  }

  def waiting(queue: Queue[Pending], processing: Seq[Pending]): Receive = {
    case request: JsonRPCRequest =>
      // there is already a batch in flight, just add this request to the queue
      context become waiting(queue :+ Pending(request, sender), processing)

    case responses: Seq[JsonRPCResponse]@unchecked =>
      log.debug("got {} responses", responses.size)
      // let's send back answers to the requestors
      require(responses.size == processing.size, s"responses=${responses.size} != processing=${processing.size}")
      responses.zip(processing).foreach {
        case (JsonRPCResponse(result, None, _), Pending(_, requestor)) => requestor ! result
        case (JsonRPCResponse(_, Some(error), _), Pending(_, requestor)) => requestor ! Status.Failure(JsonRPCError(error))
      }
      process(queue)

    case [email protected](t) =>
      log.error(t, s"got exception for batch of ${processing.size} requests")
      // let's fail all requests
      processing.foreach { case Pending(_, requestor) => requestor ! s }
      process(queue)
  }

  def process(queue: Queue[Pending]) = {
    // do we have queued requests?
    if (queue.isEmpty) {
      log.debug("no more requests, going back to idle")
      context become receive
    } else {
      val (batch, rest) = queue.splitAt(BatchingClient.BATCH_SIZE)
      log.debug(s"sending {} request(s): {} (queue={})", batch.size, batch.groupBy(_.request.method).map(e => e._1 + "=" + e._2.size).mkString(" "), queue.size)
      rpcClient.invoke(batch.map(_.request)) pipeTo self
      context become waiting(rest, batch)
    }
  }

}

object BatchingClient {

  val BATCH_SIZE = 50

  case class Pending(request: JsonRPCRequest, requestor: ActorRef)

} 
Example 16
Source File: SqlitePendingRelayDb.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.db.sqlite

import java.sql.Connection

import fr.acinq.bitcoin.ByteVector32
import fr.acinq.eclair.channel.{Command, HasHtlcId}
import fr.acinq.eclair.db.PendingRelayDb
import fr.acinq.eclair.wire.CommandCodecs.cmdCodec

import scala.collection.immutable.Queue

class SqlitePendingRelayDb(sqlite: Connection) extends PendingRelayDb {

  import SqliteUtils.ExtendedResultSet._
  import SqliteUtils._

  val DB_NAME = "pending_relay"
  val CURRENT_VERSION = 1

  using(sqlite.createStatement(), inTransaction = true) { statement =>
    require(getVersion(statement, DB_NAME, CURRENT_VERSION) == CURRENT_VERSION, s"incompatible version of $DB_NAME DB found") // there is only one version currently deployed
    // note: should we use a foreign key to local_channels table here?
    statement.executeUpdate("CREATE TABLE IF NOT EXISTS pending_relay (channel_id BLOB NOT NULL, htlc_id INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY(channel_id, htlc_id))")
  }

  override def addPendingRelay(channelId: ByteVector32, cmd: Command with HasHtlcId): Unit = {
    using(sqlite.prepareStatement("INSERT OR IGNORE INTO pending_relay VALUES (?, ?, ?)")) { statement =>
      statement.setBytes(1, channelId.toArray)
      statement.setLong(2, cmd.id)
      statement.setBytes(3, cmdCodec.encode(cmd).require.toByteArray)
      statement.executeUpdate()
    }
  }

  override def removePendingRelay(channelId: ByteVector32, htlcId: Long): Unit = {
    using(sqlite.prepareStatement("DELETE FROM pending_relay WHERE channel_id=? AND htlc_id=?")) { statement =>
      statement.setBytes(1, channelId.toArray)
      statement.setLong(2, htlcId)
      statement.executeUpdate()
    }
  }

  override def listPendingRelay(channelId: ByteVector32): Seq[Command with HasHtlcId] = {
    using(sqlite.prepareStatement("SELECT data FROM pending_relay WHERE channel_id=?")) { statement =>
      statement.setBytes(1, channelId.toArray)
      val rs = statement.executeQuery()
      codecSequence(rs, cmdCodec)
    }
  }

  override def listPendingRelay(): Set[(ByteVector32, Long)] = {
    using(sqlite.prepareStatement("SELECT channel_id, htlc_id FROM pending_relay")) { statement =>
      val rs = statement.executeQuery()
      var q: Queue[(ByteVector32, Long)] = Queue()
      while (rs.next()) {
        q = q :+ (rs.getByteVector32("channel_id"), rs.getLong("htlc_id"))
      }
      q.toSet
    }
  }

  // used by mobile apps
  override def close(): Unit = sqlite.close()
} 
Example 17
Source File: Traversal.scala    From Implementing-Graph-Algorithms-using-Scala   with MIT License 5 votes vote down vote up
package com.packt.demo.section2

import com.packt.demo.section1.Graph

import scala.collection.immutable.Queue

object Traversal {
  def traversalBFS[V](start: V, graph: Graph[V], f: V => Unit): Unit = {
    Stream.iterate((Queue(start), Set[V](start))) { case (q, visited) =>
      val (vertex, rest) = q.dequeue
      val newQueue = rest.enqueue(graph.neighbours(vertex).filterNot(visited.contains))
      val newVisited = graph.neighbours(vertex).toSet ++ visited
      (newQueue, newVisited)
    }
      .takeWhile(t => t._1.nonEmpty).foreach(t => f(t._1.head))
  }

  def traversalDFS[V](start: V, graph: Graph[V], f: V => Unit,
                      visited: Set[V] = Set[V]()): Set[V] = {
    if (visited.contains(start)) visited
    else {
      f(start)
      graph.neighbours(start).foldLeft(visited + start)((allVisited, n) =>
        traversalDFS(n, graph, f, allVisited))
    }
  }

  def traversalDFSAlt[V](start: V, graph: Graph[V], f: V => Unit): Unit = {
    Stream.iterate((List(start), Set[V](start))) { case (stk, visited) =>
        val vertex = stk.head
        val newStack = graph.neighbours(vertex).filterNot(visited.contains) ++ stk.tail
        val newVisited = graph.neighbours(vertex).toSet ++ visited
      (newStack, newVisited)
    }
      .takeWhile(t => t._1.nonEmpty).foreach(t => f(t._1.head))
  }
} 
Example 18
Source File: FlowNetwork.scala    From Implementing-Graph-Algorithms-using-Scala   with MIT License 5 votes vote down vote up
package com.packt.demo.section6

import scala.collection.immutable.Queue


object FlowNetwork {
  val vertices = List("S", "A", "B", "C", "D", "T")

  val g = Vector(
     0, 0, 0, 0, 0, 0)

  val n: Int = vertices.size

  def neighbours(graph: Vector[Int], u: Int): Vector[Int] = graph.slice(u * n, u * n + n)

  def edgeAt(graph: Vector[Int], u: Int, v: Int): Int = graph(u * n + v)

  def update(graph: Vector[Int], u: Int, v: Int, w: Int):Vector[Int] = graph.updated(u * n + v, w)

  private def buildPath(child: Int, parents: Map[Int, Int]): List[(Int, Int)] = {
    parents.get(child).map(p => (p, child) +: buildPath(p, parents))
      .getOrElse(Nil)
  }

  def findPathBFS(s: Int, t: Int, graph: Vector[Int]): List[(Int, Int)] = {
    val sq = Stream.iterate((Queue(s), Set(s), Map[Int, Int]())) {
      case (q, visited, parents) =>
        val (vertex, tail) = q.dequeue
        val nbours = neighbours(graph, vertex).zipWithIndex
          .collect { case (f, id) if f > 0 => id }.toSet -- visited
        val newQueue = tail ++ nbours
        val newVisited = nbours ++ visited
        val newParents = parents ++ nbours.map(_ -> vertex)
        (newQueue, newVisited, newParents)
    }
    val parentsMap = sq.takeWhile(q => q._1.nonEmpty).last._3
    buildPath(t, parentsMap).reverse
  }
} 
Example 19
Source File: Arrays.scala    From finagle-postgres   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.postgres.values

import scala.collection.immutable.Queue
import scala.util.parsing.combinator.RegexParsers

import com.twitter.util.{Return, Throw, Try}
import io.netty.buffer.ByteBuf
object Arrays {

  object ArrayStringParser extends RegexParsers {

    val value = """([^",}]|\")*""".r | """"([^"]|\")*"""".r
    val valueComma = "," ~ value ^^ { case "," ~ v => v }
    val values = (value ~ valueComma.*) ^^ { case first ~ rest => first :: rest } | value.? ^^ (_.toList)
    val array = "{" ~ values ~ "}" ^^ { case _ ~ vs ~ _ => vs }
    val maybeArrayValue = array | value ^^ (List(_))
    val maybeArrayValueComma = ("," ~ maybeArrayValue) ^^ { case _ ~ v => v}
    val maybeArrayValues =
      (maybeArrayValue ~ maybeArrayValueComma.*) ^^ { case first ~ rest => first ::: rest.flatten } |
        maybeArrayValue.* ^^ (_.flatten)
    val root = "{" ~ maybeArrayValues ~ "}" ^^ {
      case _ ~ vs ~ _ => vs
    }

    def apply(str: String) = parseAll(root, str) match {
      case Success(strings, _) => Return(strings)
      case Failure(_, _) | Error(_, _) => Throw(new Exception("Failed to parse array string"))
    }

  }

  // TODO: this isn't used anywhere, but it would need access to the type map and it would need to receive the elemoid
  def decodeArrayText[T](str: String, elemDecoder: ValueDecoder[T]) = {
    ArrayStringParser(str).flatMap {
      strings => strings.map(str => elemDecoder.decodeText("", str)).foldLeft[Try[Queue[T]]](Return(Queue.empty[T])) {
        (accum, next) => accum.flatMap {
          current => next.map(v => current enqueue v)
        }
      }
    }
  }

  def decodeArrayBinary[T](buf: ByteBuf, elemDecoder: ValueDecoder[T]) = {
    val ndims = buf.readInt()
    val flags = buf.readInt()
    val elemOid = buf.readInt()
  }

} 
Example 20
Source File: package.scala    From finagle-postgres   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.postgres

import scala.collection.immutable.Queue

import com.twitter.finagle.postgres.generic.enumeration.Enums
import com.twitter.finagle.postgres.values.ValueEncoder
import shapeless.ops.hlist.{LeftFolder, LiftAll, Mapper, ToList, ToTraversable, Zip}
import shapeless._
import shapeless.labelled.FieldType

package object generic extends Enums {

  implicit class ToShapelessClientOps(val client: PostgresClient) extends AnyVal {

    def queryAs[T <: Product](query: String, params: Param[_]*)(implicit
      rowDecoder: RowDecoder[T],
      columnNamer: ColumnNamer
    ) = {
      client.prepareAndQuery(query, params: _*)(row => rowDecoder(row)(columnNamer))
    }

  }

  trait QueryParam {
    def params: Seq[Param[_]]
    def placeholders(start: Int): Seq[String]
  }

  implicit class ToQueryParam[T](v: T)(implicit qp: QueryParams[T]) extends QueryParam {
    @inline final def params: Seq[Param[_]] = qp(v)
    @inline final def placeholders(start: Int): Seq[String] = qp.placeholders(v, start)
  }

  object toParam extends Poly1 {
    implicit def cases[T](implicit encoder: ValueEncoder[T]) = at[T](t => Param(t))
  }

  object toLabelledParam extends Poly1 {
    implicit def cases[K <: Symbol, T](implicit name: Witness.Aux[K], encoder: ValueEncoder[T]) = at[FieldType[K, T]] {
      t => name.value.name -> Param(t: T)
    }
  }

  implicit class QueryContext(val str: StringContext) extends AnyVal {
    def sql(queryParams: QueryParam*) = {
      val parts = if(str.parts.last == "") str.parts.dropRight(1) else str.parts
      val diff = queryParams.length - parts.length
      val pad = if(diff > 0) Seq.fill(diff)("") else Seq.empty
      Query(
        parts ++ pad,
        queryParams,
        identity)
    }
  }

} 
Example 21
Source File: Query.scala    From finagle-postgres   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.postgres.generic

import com.twitter.concurrent.AsyncStream

import scala.collection.immutable.Queue
import com.twitter.finagle.postgres.{Param, PostgresClient, Row}
import com.twitter.util.Future

import scala.language.existentials

case class Query[T](parts: Seq[String], queryParams: Seq[QueryParam], cont: Row => T) {

  def stream(client: PostgresClient): AsyncStream[T] = {
    val (queryString, params) = impl
    client.prepareAndQueryToStream[T](queryString, params: _*)(cont)
  }

  def run(client: PostgresClient): Future[Seq[T]] =
    stream(client).toSeq

  def exec(client: PostgresClient): Future[Int] = {
    val (queryString, params) = impl
    client.prepareAndExecute(queryString, params: _*)
  }

  def map[U](fn: T => U): Query[U] = copy(cont = cont andThen fn)

  def as[U](implicit rowDecoder: RowDecoder[U], columnNamer: ColumnNamer): Query[U] = {
    copy(cont = row => rowDecoder(row)(columnNamer))
  }

  private def impl: (String, Seq[Param[_]]) = {
    val (last, placeholders, params) = queryParams.foldLeft((1, Queue.empty[Seq[String]], Queue.empty[Param[_]])) {
      case ((start, placeholders, params), next) =>
        val nextPlaceholders = next.placeholders(start)
        val nextParams = Queue(next.params: _*)
        (start + nextParams.length, placeholders enqueue nextPlaceholders, params ++ nextParams)
    }

    val queryString = parts.zipAll(placeholders, "", Seq.empty).flatMap {
      case (part, ph) => Seq(part, ph.mkString(", "))
    }.mkString

    (queryString, params)
  }


}

object Query {
  implicit class RowQueryOps(val self: Query[Row]) extends AnyVal {
    def ++(that: Query[Row]): Query[Row] = Query[Row](
      parts = if(self.parts.length > self.queryParams.length)
        (self.parts.dropRight(1) :+ (self.parts.lastOption.getOrElse("") + that.parts.headOption.getOrElse(""))) ++ that.parts.drop(1)
      else
        self.parts ++ that.parts,
      queryParams = self.queryParams ++ that.queryParams,
      cont = self.cont
    )

    def ++(that: String): Query[Row] = Query[Row](
      parts = if(self.parts.length > self.queryParams.length)
          self.parts.dropRight(1) :+ (self.parts.last + that)
        else
          self.parts :+ that,
      queryParams = self.queryParams,
      cont = self.cont
    )
  }
} 
Example 22
Source File: PlanUtils.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.util

import org.apache.spark.sql.catalyst.trees.TreeNode

import scala.collection.immutable.Queue


    def toLevelOrderSeq: Seq[A] = {
      def inner(acc: Queue[A], queue: Queue[A]): Seq[A] = {
        if (queue.isEmpty) {
          acc
        } else {
          val head = queue.head
          inner(acc :+ head, queue.tail ++ head.children)
        }
      }

      inner(Queue.empty, Queue(plan).asInstanceOf[Queue[A]])
    }
  }

  sealed trait TraversalType
  object PreOrder extends TraversalType
  object PostOrder extends TraversalType
  object LevelOrder extends TraversalType
} 
Example 23
Source File: Replica.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor }
import kvstore.Arbiter._
import scala.collection.immutable.Queue
import akka.actor.SupervisorStrategy.Restart
import scala.annotation.tailrec
import akka.pattern.{ ask, pipe }
import akka.actor.Terminated
import scala.concurrent.duration._
import akka.actor.PoisonPill
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy
import akka.util.Timeout

object Replica {
  sealed trait Operation {
    def key: String
    def id: Long
  }
  case class Insert(key: String, value: String, id: Long) extends Operation
  case class Remove(key: String, id: Long) extends Operation
  case class Get(key: String, id: Long) extends Operation

  sealed trait OperationReply
  case class OperationAck(id: Long) extends OperationReply
  case class OperationFailed(id: Long) extends OperationReply
  case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply

  def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps))
}

class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor {
  import Replica._
  import Replicator._
  import Persistence._
  import context.dispatcher

  
  val replica: Receive = {
    case _ =>
  }

} 
Example 24
Source File: FutureOps.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.effect

import scala.collection.immutable.Queue
import scala.concurrent.{ExecutionContext, Future}

object FutureOps {

  final implicit class Implicits(val self: Future.type) extends AnyVal {
    def inSeries[A, B](xs: Iterable[A])(f: A => Future[B])(implicit ec: ExecutionContext): Future[Queue[B]] = {
      xs.foldLeft(Future.successful(Queue.empty[B])) {
        case (r, x) =>
          for {
            xs <- r
            b  <- f(x)
          } yield xs.enqueue(b)
      }
    }
  }

} 
Example 25
Source File: package.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex

import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.model.Price
import com.wavesplatform.dex.domain.order.Order

import scala.collection.immutable.{Queue, TreeMap}

package object model {
  type Level                 = Queue[LimitOrder]
  type OrderBookSideSnapshot = Map[Price, Seq[LimitOrder]]

  type Side = TreeMap[Price, Level]
  implicit class SideExt(val side: Side) extends AnyVal {

    
    def best: Option[(Price, LimitOrder)] = side.headOption.flatMap { case (levelPrice, level) => level.headOption.map(levelPrice -> _) }

    def enqueue(levelPrice: Price, lo: LimitOrder): Side = side.updated(levelPrice, side.getOrElse(levelPrice, Queue.empty).enqueue(lo))

    def unsafeWithoutBest: (Side, Order.Id) = side.headOption match {
      case Some((price, level)) =>
        val updated = if (level.length == 1) side - price else side.updated(price, level.tail)
        (updated, level.head.order.id())
      case None => throw new IllegalArgumentException("Expected side to have at least one order")
    }

    def unsafeUpdateBest(updated: LimitOrder): Side = {
      require(side.nonEmpty, "Cannot replace the best level of an empty side")
      val (price, level) = side.head
      require(level.nonEmpty, "Cannot replace the best element of an empty level")
      val oldHead = level.head
      require(oldHead.order.id() == updated.order.id(), "Expected the same order")
      side.updated(price, updated +: level.tail)
    }

    def unsafeRemove(price: Price, orderId: ByteStr): (Side, LimitOrder) = {
      val (toRemove, toKeep) = side.getOrElse(price, Queue.empty).partition(_.order.id() == orderId)
      require(toRemove.nonEmpty, s"Order $orderId not found at $price")
      val updatedSide = if (toKeep.isEmpty) side - price else side.updated(price, toKeep)
      (updatedSide, toRemove.head)
    }

    def put(price: Price, lo: LimitOrder): Side = side.updated(price, side.getOrElse(price, Queue.empty).enqueue(lo))

    def aggregated: Iterable[LevelAgg] = for { (p, l) <- side.view if l.nonEmpty } yield LevelAgg(l.map(_.amount).sum, p)
    def bestLevel: Option[LevelAgg]    = aggregated.headOption
  }
} 
Example 26
Source File: frontier.scala    From aima-scala   with MIT License 5 votes vote down vote up
package aima.core.search.uninformed

import aima.core.search.{Frontier, SearchNode}

import scala.collection.immutable.{Queue, Iterable}
import scala.collection.mutable
import scala.util.Try

class FIFOQueueFrontier[State, Action, Node <: SearchNode[State, Action]](queue: Queue[Node], stateSet: Set[State])
    extends Frontier[State, Action, Node] { self =>
  def this(n: Node) = this(Queue(n), Set(n.state))

  def removeLeaf: Option[(Node, Frontier[State, Action, Node])] = queue.dequeueOption.map {
    case (leaf, updatedQueue) => (leaf, new FIFOQueueFrontier[State, Action, Node](updatedQueue, stateSet - leaf.state))
  }
  def addAll(iterable: Iterable[Node]): Frontier[State, Action, Node] =
    new FIFOQueueFrontier(queue.enqueueAll(iterable), stateSet ++ iterable.map(_.state))
  def contains(state: State): Boolean = stateSet.contains(state)

  def replaceByState(node: Node): Frontier[State, Action, Node] = {
    if (contains(node.state)) {
      new FIFOQueueFrontier(queue.filterNot(_.state == node.state).enqueue(node), stateSet)
    } else {
      self
    }
  }
  def getNode(state: State): Option[Node] = {
    if (contains(state)) {
      queue.find(_.state == state)
    } else {
      None
    }
  }

  def add(node: Node): Frontier[State, Action, Node] =
    new FIFOQueueFrontier[State, Action, Node](queue.enqueue(node), stateSet + node.state)
}

class PriorityQueueHashSetFrontier[State, Action, Node <: SearchNode[State, Action]](
    queue: mutable.PriorityQueue[Node],
    stateMap: mutable.Map[State, Node]
) extends Frontier[State, Action, Node] { self =>

  def this(n: Node, costNodeOrdering: Ordering[Node]) =
    this(mutable.PriorityQueue(n)(costNodeOrdering), mutable.Map(n.state -> n))

  def removeLeaf: Option[(Node, Frontier[State, Action, Node])] =
    Try {
      val leaf = queue.dequeue
      stateMap -= leaf.state
      (leaf, self)
    }.toOption

  def addAll(iterable: Iterable[Node]): Frontier[State, Action, Node] = {
    iterable.foreach { costNode =>
      queue += costNode
      stateMap += (costNode.state -> costNode)
    }
    self
  }

  def contains(state: State): Boolean = stateMap.contains(state)

  def replaceByState(node: Node): Frontier[State, Action, Node] = {
    if (contains(node.state)) {
      val updatedElems = node :: queue.toList.filterNot(_.state == node.state)
      queue.clear()
      queue.enqueue(updatedElems: _*)
      stateMap += (node.state -> node)
    }
    self
  }

  def getNode(state: State): Option[Node] = {
    if (contains(state)) {
      queue.find(_.state == state)
    } else {
      None
    }
  }

  def add(node: Node): Frontier[State, Action, Node] = {
    val costNode = node
    queue.enqueue(costNode)
    stateMap += (node.state -> costNode)
    self
  }
}