scala.collection.immutable.VectorBuilder Scala Examples
The following examples show how to use scala.collection.immutable.VectorBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: GroupedWithin.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws package util import akka.actor.typed.Scheduler import akka.actor.Cancellable import scala.collection.immutable.VectorBuilder import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext final class GroupedWithin()(implicit scheduler: Scheduler, ec: ExecutionContext) { def apply[A](nb: Int, interval: FiniteDuration)(emit: Emit[Vector[A]]) = new GroupedWithinStage[A](nb, interval, emit) } final class GroupedWithinStage[A]( nb: Int, interval: FiniteDuration, emit: Emit[Vector[A]] )(implicit scheduler: Scheduler, ec: ExecutionContext ) { private val buffer: VectorBuilder[A] = new VectorBuilder private var scheduledFlush: Cancellable = scheduler.scheduleOnce(interval, () => flush) def apply(elem: A): Unit = synchronized { buffer += elem if (buffer.size >= nb) unsafeFlush } private def flush(): Unit = synchronized { unsafeFlush } private def unsafeFlush(): Unit = { if (buffer.nonEmpty) { emit(buffer.result) buffer.clear } scheduledFlush.cancel scheduledFlush = scheduler.scheduleOnce(interval, () => flush) } }
Example 2
Source File: CassandraEventLogStore.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.log.cassandra import java.io.Closeable import java.lang.{ Long => JLong } import com.datastax.driver.core._ import com.rbmhtechnology.eventuate._ import com.rbmhtechnology.eventuate.log._ import scala.collection.JavaConverters._ import scala.collection.immutable.{ VectorBuilder, Seq } import scala.concurrent.{ ExecutionContext, Future } private[eventuate] class CassandraEventLogStore(cassandra: Cassandra, logId: String) { val preparedWriteEventStatement: PreparedStatement = cassandra.prepareWriteEvent(logId) val preparedReadEventsStatement: PreparedStatement = cassandra.prepareReadEvents(logId) def write(events: Seq[DurableEvent], partition: Long) = cassandra.executeBatch { batch => events.foreach { event => batch.add(preparedWriteEventStatement.bind(partition: JLong, event.localSequenceNr: JLong, cassandra.eventToByteBuffer(event))) } } def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, fetchSize: Int)(implicit executor: ExecutionContext): Future[BatchReadResult] = readAsync(fromSequenceNr, toSequenceNr, max, fetchSize, Int.MaxValue, _ => true) def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean)(implicit executor: ExecutionContext): Future[BatchReadResult] = Future(read(fromSequenceNr, toSequenceNr, max, scanLimit, fetchSize, filter)) def read(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean): BatchReadResult = { val iter = eventIterator(fromSequenceNr, toSequenceNr, fetchSize) val builder = new VectorBuilder[DurableEvent] var lastSequenceNr = fromSequenceNr - 1L var scanned = 0 var filtered = 0 while (iter.hasNext && filtered < max && scanned < scanLimit) { val event = iter.next() if (filter(event)) { builder += event filtered += 1 } scanned += 1 lastSequenceNr = event.localSequenceNr } BatchReadResult(builder.result(), lastSequenceNr) } def eventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int): Iterator[DurableEvent] with Closeable = new EventIterator(fromSequenceNr, toSequenceNr, fetchSize) private class EventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int) extends Iterator[DurableEvent] with Closeable { import cassandra.settings._ import EventLog._ var currentSequenceNr = math.max(fromSequenceNr, 1L) var currentPartition = partitionOf(currentSequenceNr, partitionSize) var currentIter = newIter() var read = currentSequenceNr != firstSequenceNr(currentPartition, partitionSize) def newIter(): Iterator[Row] = if (currentSequenceNr > toSequenceNr) Iterator.empty else read(lastSequenceNr(currentPartition, partitionSize) min toSequenceNr).iterator.asScala def read(upperSequenceNr: Long): ResultSet = cassandra.session.execute(preparedReadEventsStatement.bind(currentPartition: JLong, currentSequenceNr: JLong, upperSequenceNr: JLong).setFetchSize(fetchSize)) @annotation.tailrec final def hasNext: Boolean = { if (currentIter.hasNext) { true } else if (read) { // some events read from current partition, try next partition currentPartition += 1 currentSequenceNr = firstSequenceNr(currentPartition, partitionSize) currentIter = newIter() read = false hasNext } else { // no events read from current partition, we're done false } } def next(): DurableEvent = { val row = currentIter.next() currentSequenceNr = row.getLong("sequence_nr") read = true cassandra.eventFromByteBuffer(row.getBytes("event")) } override def close(): Unit = () } }
Example 3
Source File: GroupedWithinTask.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.task import org.apache.gearpump.Message import org.apache.gearpump.cluster.UserConfig import org.apache.gearpump.streaming.task.TaskContext import scala.collection.immutable.VectorBuilder import scala.concurrent.duration.FiniteDuration class GroupedWithinTask[T](context: TaskContext, userConf : UserConfig) extends GraphTask(context, userConf) { case object GroupedWithinTrigger val buf: VectorBuilder[T] = new VectorBuilder val timeWindow = userConf.getValue[FiniteDuration](GroupedWithinTask.TIME_WINDOW) val batchSize = userConf.getInt(GroupedWithinTask.BATCH_SIZE) override def onNext(msg: Message) : Unit = { } } object GroupedWithinTask { val BATCH_SIZE = "BATCH_SIZE" val TIME_WINDOW = "TIME_WINDOW" }
Example 4
Source File: JdbcReadSideQuery.scala From lagom with Apache License 2.0 | 5 votes |
package docs.home.scaladsl.persistence //#imports import scala.collection.immutable import scala.collection.immutable.VectorBuilder import akka.NotUsed import com.lightbend.lagom.scaladsl.api.Service import com.lightbend.lagom.scaladsl.api.ServiceCall import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession //#imports trait JdbcReadSideQuery { trait BlogService extends Service { def getPostSummaries(): ServiceCall[NotUsed, immutable.IndexedSeq[PostSummary]] override def descriptor = ??? } //#service-impl class BlogServiceImpl(jdbcSession: JdbcSession) extends BlogService { import JdbcSession.tryWith override def getPostSummaries() = ServiceCall { request => jdbcSession.withConnection { connection => tryWith(connection.prepareStatement("SELECT id, title FROM blogsummary")) { ps => tryWith(ps.executeQuery()) { rs => val summaries = new VectorBuilder[PostSummary] while (rs.next()) { summaries += PostSummary(rs.getString("id"), rs.getString("title")) } summaries.result() } } } } //#service-impl } }
Example 5
Source File: RedisMsgTest.scala From scala-commons with MIT License | 5 votes |
package com.avsystem.commons package redis.protocol import akka.util.ByteString import com.avsystem.commons.redis.protocol.RedisMsgScalacheck._ import org.scalacheck.Gen import org.scalatest.funsuite.AnyFunSuite import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks import scala.collection.immutable.VectorBuilder class RedisMsgTest extends AnyFunSuite with ScalaCheckPropertyChecks { def splitAtIndices(repr: ByteString, indices: Seq[Int]): Seq[ByteString] = (indices :+ repr.length).foldLeft((0, Vector.empty[ByteString])) { case ((prevIdx, acc), nextIdx) => (nextIdx, acc :+ repr.slice(prevIdx, nextIdx)) }._2 test("encoded and then decoded messages should be equal to the original messages") { val gen = for { redisMsgs <- Gen.buildableOf[Seq[RedisMsg], RedisMsg](redisProtocolMsgGen) splitPoints <- Gen.buildableOf[Seq[Double], Double](Gen.choose(0.0, 1.0)) } yield (redisMsgs, splitPoints) forAll(gen) { case (redisMsgs, splitPoints) => val repr = RedisMsg.encode(redisMsgs) val splitIndices = splitPoints.map(sp => (sp * (repr.size - 1)).toInt).toSet.toVector.sorted val encodedParts = splitAtIndices(repr, splitIndices) val decoded = new VectorBuilder[RedisMsg] val decoder = new RedisMsg.Decoder encodedParts.foreach(bs => decoder.decodeMore(bs)(decoded += _)) val decodedMsgs = decoded.result() assert(decodedMsgs == redisMsgs) } } test("encoded size") { forAll(redisProtocolMsgGen) { msg => assert(RedisMsg.encode(msg).length == RedisMsg.encodedSize(msg)) } } test("simple string encode") { assert(RedisMsg.encode(SimpleStringMsg("asdf")).utf8String == "+asdf\r\n") } test("error encode") { assert(RedisMsg.encode(ErrorMsg("asdf")).utf8String == "-asdf\r\n") } test("bulk string encode") { assert(RedisMsg.encode(BulkStringMsg(ByteString("srsly"))).utf8String == "$5\r\nsrsly\r\n") } test("null bulk string encode") { assert(RedisMsg.encode(NullBulkStringMsg).utf8String == "$-1\r\n") } test("array encode") { assert(RedisMsg.encode(ArrayMsg(Vector(IntegerMsg(42), IntegerMsg(43)))).utf8String == "*2\r\n:42\r\n:43\r\n") } test("null array encode") { assert(RedisMsg.encode(NullArrayMsg).utf8String == "*-1\r\n") } test("integer encode") { assert(RedisMsg.encode(IntegerMsg(-1)).utf8String == ":-1\r\n") } }
Example 6
Source File: AggregationNode.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.nodes.unary.aggregation import ingraph.ire.datatypes.{Tuple, TupleBag} import ingraph.ire.messages.{ChangeSet, ReteMessage, SingleForwarder} import ingraph.ire.messages.SingleForwarder import ingraph.ire.nodes.unary.UnaryNode import scala.collection.immutable.VectorBuilder import scala.collection.mutable class AggregationNode(override val next: (ReteMessage) => Unit, mask: Vector[Tuple => Any], factories: () => Vector[StatefulAggregate], projection: Vector[Int]) extends UnaryNode with SingleForwarder { private val keyCount = mutable.Map[Tuple, Int]().withDefault(f => 0) private val data = mutable.Map[Tuple, Vector[StatefulAggregate]]().withDefault(f => factories()) override def onChangeSet(changeSet: ChangeSet): Unit = { val oldValues = mutable.Map[Tuple, (Tuple, Int)]() for ((key, tuples) <- changeSet.positive.groupBy(t => mask.map(m => m(t)))) { val aggregators = data.getOrElseUpdate(key, factories()) oldValues.getOrElseUpdate(key, (aggregators.map(_.value()), keyCount(key))) for (aggregator <- aggregators) aggregator.maintainPositive(tuples) keyCount(key) += tuples.size } for ((key, tuples) <- changeSet.negative.groupBy(t => mask.map(m => m(t)))) { val aggregators = data.getOrElseUpdate(key, factories()) oldValues.getOrElseUpdate(key, (aggregators.map(_.value()), keyCount(key))) for (aggregator <- aggregators) aggregator.maintainNegative(tuples) keyCount(key) -= tuples.size } val positive = new VectorBuilder[Tuple] val negative = new VectorBuilder[Tuple] for ((key, (oldValues, oldCount)) <- oldValues) { val newValues = data(key).map(_.value()) if (oldValues != newValues || oldCount == 0) { if (keyCount(key) != 0) positive += key ++ newValues if (oldCount != 0) negative += key ++ oldValues } } val positiveBag: TupleBag = positive.result().map(t => projection.map(t)) val negativeBag: TupleBag = negative.result().map(t => projection.map(t)) forward(ChangeSet( positive = positiveBag, negative = negativeBag)) } override def onSizeRequest(): Long = ??? }
Example 7
Source File: SortAndTopNode.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.nodes.unary import java.util.Comparator import ingraph.ire.datatypes.Tuple import ingraph.ire.math.GenericMath import ingraph.ire.messages.{ChangeSet, ReteMessage} import ingraph.ire.util.SizeCounter import ingraph.ire.messages.SingleForwarder import scala.collection.immutable.VectorBuilder class SortAndTopNode(override val next: (ReteMessage) => Unit, tupleLength: Int, selectionMask: Vector[(Tuple) => Any], skip: Option[Long], limit: Option[Long], ascendingOrder: Vector[Boolean]) extends UnaryNode with SingleForwarder { val longSkip = skip.getOrElse(0L) val longLimit = limit.getOrElse(Long.MaxValue / 2L) //implicit val order = new Ordering[Tuple] { val comparator = new Comparator[Tuple] { override def compare(x: Tuple, y: Tuple): Int = { if (x == y) return 0 for ((x, y, ascending) <- (keyLookup(x), keyLookup(y), ascendingOrder).zipped) { val cmp = GenericMath.compare(x, y) if (cmp != 0) { return if (ascending) cmp else -cmp } } // Treemap uses the ordering function equality for detecting duplicate keys, so we need to make sure // that tuples with same keys are compared as different GenericMath.compare(x, y) } } // we can use Scala Tree once we migrate to 2.12 val data: java.util.Map[Tuple, Int] = new java.util.TreeMap(comparator) //null //mutable.TreeMap[Tuple, Int]().withDefault(t => 0) def keyLookup(t: Tuple): Vector[Any] = selectionMask.map(m => m(t)) def getTuplesInOrder: Vector[Tuple] = { var total: Long = 0 val iterator = data.entrySet.iterator val builder = new VectorBuilder[Tuple] while (total < longLimit + longSkip && iterator.hasNext) { val entry = iterator.next val tuple = entry.getKey val count = entry.getValue for (i <- 0L until math.min(count, longLimit + longSkip - total)) builder += tuple total += count } builder.result().drop(longSkip.toInt) } override def onChangeSet(changeSet: ChangeSet): Unit = { // TODO maybe checking the changed elements against the lowest forwarded element would speed things up val prevTop = getTuplesInOrder for (tuple <- changeSet.positive) { val count : Int = Some(data.get(tuple)).getOrElse(0) data.put(tuple, count + 1) } for (tuple <- changeSet.negative) { val count = data.get(tuple) if (count > 1) { data.put(tuple, count - 1) } else { data.remove(tuple) } } val topN = getTuplesInOrder if (topN != prevTop) { forward(ChangeSet(positive = topN, negative = prevTop)) } } // TODO we can simplify this in 2.12 override def onSizeRequest(): Long = SizeCounter.count(data.keySet) }
Example 8
Source File: TestStage.scala From swave with Mozilla Public License 2.0 | 5 votes |
package swave.core.internal.testkit import scala.collection.immutable.VectorBuilder import scala.concurrent.{Future, Promise} import swave.core.impl.stages.StageImpl private[testkit] trait TestStage extends StageImpl { private[this] val resultBuilder = new VectorBuilder[AnyRef] private[this] var _resultSize = 0 private[this] var _fixtureState: TestFixture.State = TestFixture.State.Starting private[this] val _finishedState = Promise[TestFixture.State.Finished]() private[this] var onElem: AnyRef ⇒ Unit = x ⇒ () def fixtureState: TestFixture.State = _fixtureState def fixtureState_=(value: TestFixture.State): Unit = { value match { case TestFixture.State.Cancelled ⇒ _finishedState.success(TestFixture.State.Cancelled) case TestFixture.State.Completed ⇒ _finishedState.success(TestFixture.State.Completed) case TestFixture.State.Error(e) ⇒ _finishedState.failure(e) case _ ⇒ } _fixtureState = value } def finishedState: Future[TestFixture.State.Finished] = _finishedState.future private[testkit] final def result[T]: Vector[T] = resultBuilder.result().asInstanceOf[Vector[T]] private[testkit] final def resultSize: Int = _resultSize protected final def recordElem(elem: AnyRef): Unit = { resultBuilder += elem _resultSize += 1 onElem(elem) } def appendElemHandler(f: AnyRef ⇒ Unit): Unit = { val prev = onElem onElem = { elem ⇒ prev(elem) f(elem) } } def id: Int def formatLong: String def scriptedSize: Int }
Example 9
Source File: LineRendering.scala From swave with Mozilla Public License 2.0 | 5 votes |
package swave.core.graph.impl import java.lang.StringBuilder import swave.core.graph.{Digraph, GlyphSet} import scala.collection.immutable.VectorBuilder import Infrastructure._ private[graph] object LineRendering { def renderLines[V](nodes: Vector[Node], glyphSet: GlyphSet): Digraph.Rendering[V] = { var charBuf: Array[Char] = Array.emptyCharArray val lines = new VectorBuilder[String] val sb = new StringBuilder var maxLineLength = 0 val vertexRenderings = for (node ← nodes) yield { val maxChars = node.glyphs.size * glyphSet.columns * 2 if (charBuf.length < maxChars) charBuf = new Array[Char](maxChars) lines.clear() for (glyphRow ← 0 until glyphSet.rows) { val endIx = node.glyphs.foldLeft(0)((ix, glyph) ⇒ glyphSet.place(glyph, glyphRow, charBuf, ix)) sb.append(charBuf, 0, endIx) // trim whitespace at line end while (sb.length > 0 && Character.isWhitespace(sb.charAt(sb.length - 1))) sb.setLength(sb.length - 1) // if (glyphRow == glyphSet.rows / 2) sb.append(' ').append(node.xRank.level) val line = sb.toString maxLineLength = math.max(maxLineLength, line.length) lines += line sb.setLength(0) } Digraph.VertexRendering(node.vertex.asInstanceOf[V], lines.result()) } Digraph.Rendering(glyphSet, maxLineLength, vertexRenderings) } }