scala.collection.immutable.Iterable Scala Examples
The following examples show how to use scala.collection.immutable.Iterable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MultiFixtureBase.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testing.utils import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit} import com.daml.dec.DirectExecutionContext import org.scalatest._ import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans} import org.scalatest.exceptions.TestCanceledException import org.scalatest.time.Span import scala.collection.immutable.Iterable import scala.concurrent.duration.DurationInt import scala.concurrent.{Future, Promise, TimeoutException} import scala.util.control.{NoStackTrace, NonFatal} trait MultiFixtureBase[FixtureId, TestContext] extends Assertions with BeforeAndAfterAll with ScaledTimeSpans with AsyncTimeLimitedTests { self: AsyncTestSuite => private var es: ScheduledExecutorService = _ override protected def beforeAll(): Unit = { super.beforeAll() es = Executors.newScheduledThreadPool(1) } override protected def afterAll(): Unit = { es.shutdownNow() super.afterAll() } protected class TestFixture(val id: FixtureId, createContext: () => TestContext) { def context(): TestContext = createContext() } def timeLimit: Span = scaled(30.seconds) object TestFixture { def apply(id: FixtureId, createContext: () => TestContext): TestFixture = new TestFixture(id, createContext) def unapply(testFixture: TestFixture): Option[(FixtureId, TestContext)] = Some((testFixture.id, testFixture.context())) } protected def fixtures: Iterable[TestFixture] protected def allFixtures(runTest: TestContext => Future[Assertion]): Future[Assertion] = forAllFixtures(fixture => runTest(fixture.context)) protected def forAllFixtures(runTest: TestFixture => Future[Assertion]): Future[Assertion] = { forAllMatchingFixtures { case f => runTest(f) } } protected def forAllMatchingFixtures( runTest: PartialFunction[TestFixture, Future[Assertion]]): Future[Assertion] = { if (parallelExecution) { val results = fixtures.map( fixture => if (runTest.isDefinedAt(fixture)) runTestAgainstFixture(fixture, runTest) else Future.successful(succeed)) Future.sequence(results).map(foldAssertions) } else { fixtures.foldLeft(Future.successful(succeed)) { case (resultSoFar, thisFixture) => resultSoFar.flatMap { case Succeeded => runTestAgainstFixture(thisFixture, runTest) case other => Future.successful(other) } } } } }
Example 2
Source File: frontier.scala From aima-scala with MIT License | 5 votes |
package aima.core.search.uninformed import aima.core.search.{Frontier, SearchNode} import scala.collection.immutable.{Queue, Iterable} import scala.collection.mutable import scala.util.Try class FIFOQueueFrontier[State, Action, Node <: SearchNode[State, Action]](queue: Queue[Node], stateSet: Set[State]) extends Frontier[State, Action, Node] { self => def this(n: Node) = this(Queue(n), Set(n.state)) def removeLeaf: Option[(Node, Frontier[State, Action, Node])] = queue.dequeueOption.map { case (leaf, updatedQueue) => (leaf, new FIFOQueueFrontier[State, Action, Node](updatedQueue, stateSet - leaf.state)) } def addAll(iterable: Iterable[Node]): Frontier[State, Action, Node] = new FIFOQueueFrontier(queue.enqueueAll(iterable), stateSet ++ iterable.map(_.state)) def contains(state: State): Boolean = stateSet.contains(state) def replaceByState(node: Node): Frontier[State, Action, Node] = { if (contains(node.state)) { new FIFOQueueFrontier(queue.filterNot(_.state == node.state).enqueue(node), stateSet) } else { self } } def getNode(state: State): Option[Node] = { if (contains(state)) { queue.find(_.state == state) } else { None } } def add(node: Node): Frontier[State, Action, Node] = new FIFOQueueFrontier[State, Action, Node](queue.enqueue(node), stateSet + node.state) } class PriorityQueueHashSetFrontier[State, Action, Node <: SearchNode[State, Action]]( queue: mutable.PriorityQueue[Node], stateMap: mutable.Map[State, Node] ) extends Frontier[State, Action, Node] { self => def this(n: Node, costNodeOrdering: Ordering[Node]) = this(mutable.PriorityQueue(n)(costNodeOrdering), mutable.Map(n.state -> n)) def removeLeaf: Option[(Node, Frontier[State, Action, Node])] = Try { val leaf = queue.dequeue stateMap -= leaf.state (leaf, self) }.toOption def addAll(iterable: Iterable[Node]): Frontier[State, Action, Node] = { iterable.foreach { costNode => queue += costNode stateMap += (costNode.state -> costNode) } self } def contains(state: State): Boolean = stateMap.contains(state) def replaceByState(node: Node): Frontier[State, Action, Node] = { if (contains(node.state)) { val updatedElems = node :: queue.toList.filterNot(_.state == node.state) queue.clear() queue.enqueue(updatedElems: _*) stateMap += (node.state -> node) } self } def getNode(state: State): Option[Node] = { if (contains(state)) { queue.find(_.state == state) } else { None } } def add(node: Node): Frontier[State, Action, Node] = { val costNode = node queue.enqueue(costNode) stateMap += (node.state -> costNode) self } }
Example 3
Source File: FrontierSearch.scala From aima-scala with MIT License | 5 votes |
package aima.core.search import scala.collection.immutable.Iterable trait Frontier[State, Action, Node <: SearchNode[State, Action]] { def replaceByState(childNode: Node): Frontier[State, Action, Node] def getNode(state: State): Option[Node] def removeLeaf: Option[(Node, Frontier[State, Action, Node])] def add(node: Node): Frontier[State, Action, Node] def addAll(iterable: Iterable[Node]): Frontier[State, Action, Node] def contains(state: State): Boolean } trait FrontierSearch[State, Action, Node <: SearchNode[State, Action]] { def newFrontier(state: State, noAction: Action): Frontier[State, Action, Node] }
Example 4
Source File: MemoryVectorStoreIndexer.scala From dbpedia-spotlight-model with Apache License 2.0 | 5 votes |
package org.dbpedia.spotlight.db import java.io.File import breeze.linalg.DenseMatrix import org.dbpedia.spotlight.db.model.{ResourceStore, TokenTypeStore} import org.dbpedia.spotlight.exceptions.DBpediaResourceNotFoundException import scala.collection.immutable.Iterable import scala.io.Source import java.util import org.dbpedia.spotlight.db.memory.{MemoryStore, MemoryVectorStore} import org.dbpedia.spotlight.model.{TokenType, DBpediaResource, TokenOccurrenceIndexer} class MemoryVectorStoreIndexer(modelPath: File, dictPath: File){ lazy val contextStore = new MemoryVectorStore() var dict: Map[String, Int] = null def loadVectorDict(tokenTypeStore: TokenTypeStore, resourceStore: ResourceStore) = { println("Loading vector dictionary!") dict = Source.fromFile(dictPath, "UTF-8").getLines().map { line => val contents = line.split("\t") (contents(0), contents(1).toInt) }.toMap var resources: collection.mutable.Map[Int, Int] = collection.mutable.HashMap[Int,Int]() var tokens: collection.mutable.Map[Int, Int] = collection.mutable.HashMap[Int,Int]() // TODO: error handling if we can't find the token or resource var failedResources = 0 var succeededResources = 0 var failedTokens = 0 var succeededTokens = 0 dict.foreach { case(key, value) => if(key.startsWith("DBPEDIA_ID/")){ try { val resource = resourceStore.getResourceByName(key.replace("DBPEDIA_ID/", "")) resources += (resource.id -> value) succeededResources += 1 } catch { case e: DBpediaResourceNotFoundException=> { failedResources += 1 if (failedResources % 1000 == 0){ println("Can't find resource: " + key.replace("DBPEDIA_ID/", "")) } } } }else{ val token = tokenTypeStore.getTokenType(key) if (token == TokenType.UNKNOWN){ failedTokens += 1 if (failedTokens % 1000 == 0){ println("Can't find token: " + key) } } else { tokens += (token.id -> value) succeededTokens += 1 } } } println("Failed on " + failedResources + " entities, succeeded on " + succeededResources) println("Failed on " + failedTokens + " tokens, succeeded on " + succeededTokens) contextStore.resourceIdToVectorIndex = resources.toMap contextStore.tokenTypeIdToVectorIndex = tokens.toMap println("Done loading dict.") } def loadVectorsAndWriteToStore(outputFile:File) = { println("Loading vectors..") val matrixSource = Source.fromFile(modelPath) val lines = matrixSource.getLines() val rows = lines.next().substring(2).toInt val cols = lines.next().substring(2).toInt contextStore.vectors = new DenseMatrix[Float](rows, cols) println("Reading CSV and writing to store...") lines.zipWithIndex.foreach { case (row_str, row_idx) => if (row_idx % 10000 == 0) println("At row " + row_idx) val values = row_str.split(",").map(_.trim).map(_.toDouble) values.zipWithIndex.foreach { case (value, col_idx) => contextStore.vectors(row_idx, col_idx) = value.toFloat } } matrixSource.close() println("Done, dumping..") MemoryStore.dump(contextStore, outputFile) } }
Example 5
Source File: TextReport.scala From sbt-flaky with Apache License 2.0 | 5 votes |
package flaky.report import flaky.FlakyTestReport import scala.collection.immutable.Iterable object TextReport { def render(report:FlakyTestReport): String = { val projectName = report.projectName val flaky = report.flakyTests import scala.Console._ val sb = new StringBuilder sb.append(s"$CYAN Flaky tests result for $BOLD $projectName\n") .append(GREEN) .append("Healthy tests:\n") flaky .filter(_.failures == 0) .foreach { healthy => sb.append(s"$GREEN${healthy.test}\n") } sb.append("\n") .append(RED) .append("Flaky tests:\n") val flakyTesRuns = flaky .filter(_.failures > 0) .sortBy(_.failures()) .reverse flakyTesRuns .foreach { flaky => sb.append(f"$RED${flaky.test} ${flaky.failures * 100f / flaky.totalRun}%.2f%%\n") } sb.append(s"\n${CYAN}Details:\n") val failedDetails: Iterable[String] = report.groupFlakyCases() .map { case (testClass, flakyTestCases) => val flakyTestsDescription: String = flakyTestCases .sortBy(_.runNames.size) .map { fc => val test = fc.test val message = fc.message.getOrElse("?") val runNames = fc.runNames.sorted.mkString(", ") val text = s"""| [${fc.runNames.size} times] $RED$test$RESET | In following test runs: $runNames | Message: $RED$message$RESET | ${fc.stacktrace} | """.stripMargin text }.mkString("\n") s""" | $RED$testClass |$flakyTestsDescription$RESET | """.stripMargin } failedDetails.foreach(sb.append) if (flakyTesRuns.isEmpty) { sb.append(s"${GREEN}No flaky test detected") } sb.append(RESET) sb.toString } }
Example 6
Source File: Example1.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin.examples import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import akka.util.Timeout import net.fehmicansaglam.bson.BsonDocument import net.fehmicansaglam.bson.BsonDsl._ import net.fehmicansaglam.tepkin.MongoClient import scala.collection.immutable.Iterable import scala.concurrent.Await import scala.concurrent.duration._ object Example1 extends App { val begin = System.currentTimeMillis() // Connect to Mongo client val client = MongoClient("mongodb://localhost") // Use client's execution context for async operations import client.{context, ec} // Obtain reference to database "tepkin" using client val db = client("tepkin") // Obtain reference to the collection "collection1" using database val collection1 = db("collection1") // Obtain reference to the collection "collection2" using database val collection2 = db("collection2") implicit val timeout: Timeout = 30.seconds implicit val mat = ActorMaterializer() // Batch document source def documents(n: Int): Source[List[BsonDocument], akka.NotUsed] = Source { Iterable.tabulate(n) { _ => (1 to 1000).map(i => $document("name" := s"fehmi$i")).toList } } // Insert 3M documents and then read them all. val futureResult = for { delete1 <- collection1.drop() delete2 <- collection2.drop() insert1 <- collection1.insertFromSource(documents(1000)).runForeach(_ => ()) insert2 <- collection2.insertFromSource(documents(2000)).runForeach(_ => ()) source1 = collection1.find(BsonDocument.empty, batchMultiplier = 10000) source2 = collection2.find(BsonDocument.empty, batchMultiplier = 10000) fold1 = source1.runFold(0) { (total, documents) => total + documents.size } fold2 = source2.runFold(0) { (total, documents) => total + documents.size } result1 <- fold1 result2 <- fold2 } yield (result1, result2) val result = Await.result(futureResult, 90.seconds) println(s"collection1: ${result._1}") println(s"collection2: ${result._2}") println(s"Elapsed: ${System.currentTimeMillis() - begin}ms") // Drop created collections Await.ready(collection1.drop(), 10.seconds) Await.ready(collection2.drop(), 10.seconds) client.shutdown() }
Example 7
Source File: SinkExample.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin.examples import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import net.fehmicansaglam.bson.BsonDsl._ import net.fehmicansaglam.bson.{BsonDocument, Bulk} import net.fehmicansaglam.tepkin.MongoClient import scala.collection.immutable.Iterable object SinkExample extends App { // Connect to Mongo client val client = MongoClient("mongodb://localhost") import client.context // Obtain reference to database "tepkin" using client val db = client("tepkin") // Obtain reference to the collection "collection1" using database val collection1 = db("collection1") // Obtain reference to the collection "collection2" using database val collection2 = db("collection2") implicit val mat = ActorMaterializer() // Batch document source def documents(n: Int): Source[List[BsonDocument], akka.NotUsed] = Source { Iterable.tabulate(n) { _ => (1 to 1000).map(i => $document("name" := s"fehmi$i")).toList } } val ref1 = documents(1000).map(Bulk).runWith(collection1.sink()) val ref2 = documents(2000).map(Bulk).runWith(collection2.sink()) client.shutdown(ref1, ref2) }
Example 8
Source File: ProgressBarUtil.scala From Argus-SAF with Apache License 2.0 | 5 votes |
package org.argus.jawa.core.util import hu.ssh.progressbar.ProgressBar import scala.collection.immutable.Iterable object ProgressBarUtil { def withProgressBar[T, R](msg: String, pb: ProgressBar)(tasks: Iterable[T], f: T => R): Iterable[R] = { println(msg + " Total: " + tasks.size) if(tasks.isEmpty) return isetEmpty val progressBar = pb.withTotalSteps(tasks.size) progressBar.start() val result = tasks.map { task => progressBar.tickOne() f(task) } progressBar.complete() result } }
Example 9
Source File: ClusteringEvaluator.scala From fotm-info with MIT License | 5 votes |
package info.fotm.clustering import info.fotm.clustering.ClusteringEvaluatorData.DataPoint import info.fotm.domain.Domain._ import info.fotm.domain._ import info.fotm.util.Statistics.Metrics import info.fotm.util.{MathVector, Statistics} import scala.collection.breakOut import scala.collection.immutable.Iterable import scala.util.Random class ClusteringEvaluator(features: List[Feature[CharacterStatsUpdate]]) extends App { type Bucket = Set[CharacterStatsUpdate] type BucketFilter = (Bucket => Bucket) def findTeamsInUpdate(ladderUpdate: LadderUpdate, clusterer: RealClusterer, bucketFilter: BucketFilter = identity): Set[Team] = (for { bucket <- splitIntoBuckets(ladderUpdate) team <- findTeamsInBucket(bucket, ladderUpdate.current.axis.bracket.size, clusterer, bucketFilter) } yield team)(breakOut) def splitIntoBuckets(ladderUpdate: LadderUpdate): Iterable[Set[CharacterStatsUpdate]] = for { (_, factionUpdates) <- ladderUpdate.statsUpdates.groupBy(u => ladderUpdate.current.rows(u.id).view.factionId) (winners, losers) = factionUpdates.partition(u => ladderUpdate.current(u.id).season.wins > ladderUpdate.previous(u.id).season.wins) bucket <- Seq(winners, losers) } yield bucket def findTeamsInBucket(inputBucket: Set[CharacterStatsUpdate], teamSize: Int, clusterer: RealClusterer, bucketFilter: BucketFilter = identity): Set[Team] = { val bucket = bucketFilter(inputBucket).toSeq // NB! do not remove .toSeq here or .zip below won't work if (bucket.isEmpty) Set() else { val featureVectors: Seq[MathVector] = Feature.normalize(features, bucket) val featureMap: Map[CharacterId, MathVector] = bucket.map(_.id).zip(featureVectors)(breakOut) val clusters = clusterer.clusterize(featureMap, teamSize) clusters.map(ps => Team(ps.toSet)) } } def noiseFilter(nLost: Int): BucketFilter = bucket => Random.shuffle(bucket.toSeq).drop(nLost / 2).dropRight(nLost / 2).toSet def evaluateStep(clusterer: RealClusterer, ladderUpdate: LadderUpdate, games: Set[Game], nLost: Int = 0): Statistics.Metrics = { print(".") val actualTeamsPlayed: Set[Team] = games.flatMap(g => Seq(g._1, g._2)) val bucketFilter: BucketFilter = noiseFilter(nLost) val teamsFound: Set[Team] = findTeamsInUpdate(ladderUpdate, clusterer, bucketFilter) Statistics.calcMetrics(teamsFound, actualTeamsPlayed) // TODO: add noise filtering } def evaluate(clusterer: RealClusterer, data: Stream[DataPoint]): Double = { val stats: Seq[Metrics] = for { (ladderUpdate, games) <- data noise = 2 * games.head._1.members.size - 1 } yield evaluateStep(clusterer, ladderUpdate, games, noise) val combinedMetrics: Metrics = stats.reduce(_ + _) println(s"\n$combinedMetrics") Statistics.fScore(0.5)(combinedMetrics) } }
Example 10
Source File: ReactElementContainer.scala From slinky with MIT License | 5 votes |
package slinky.core import slinky.core.facade.ReactElement import scala.collection.immutable.{Iterable, Queue} import scala.concurrent.Future import scala.scalajs.js import scala.util.Try trait ReactElementContainer[F[_]] extends Any { self => def map[A](fa: F[A])(f: A => ReactElement): F[ReactElement] } object ReactElementContainer { def apply[F[_]: ReactElementContainer]: ReactElementContainer[F] = implicitly[ReactElementContainer[F]] @inline implicit def function0Container: ReactElementContainer[Function0] = new ReactElementContainer[Function0] { override def map[A](fa: () => A)(f: A => ReactElement): () => ReactElement = () => f(fa()) } @inline implicit def futureContainer: ReactElementContainer[Future] = new ReactElementContainer[Future] { import scala.concurrent.ExecutionContext.Implicits.global override def map[A](fa: Future[A])(f: A => ReactElement): Future[ReactElement] = fa.map(f) } @inline implicit def iterableContainer: ReactElementContainer[Iterable] = new ReactElementContainer[Iterable] { override def map[A](fa: Iterable[A])(f: A => ReactElement): Iterable[ReactElement] = fa.map(f) } @inline implicit def jsUndefOrContainer: ReactElementContainer[js.UndefOr] = new ReactElementContainer[js.UndefOr] { override def map[A](fa: js.UndefOr[A])(f: A => ReactElement): js.UndefOr[ReactElement] = fa.map(f) } @inline implicit def listContainer: ReactElementContainer[List] = new ReactElementContainer[List] { override def map[A](fa: List[A])(f: A => ReactElement): List[ReactElement] = fa.map(f) } @inline implicit def optionContainer: ReactElementContainer[Option] = new ReactElementContainer[Option] { override def map[A](fa: Option[A])(f: A => ReactElement): Option[ReactElement] = fa.map(f) } @inline implicit def queueContainer: ReactElementContainer[Queue] = new ReactElementContainer[Queue] { override def map[A](fa: Queue[A])(f: A => ReactElement): Queue[ReactElement] = fa.map(f) } @inline implicit def seqContainer: ReactElementContainer[Seq] = new ReactElementContainer[Seq] { override def map[A](fa: Seq[A])(f: A => ReactElement): Seq[ReactElement] = fa.map(f) } @inline implicit def setContainer: ReactElementContainer[Set] = new ReactElementContainer[Set] { override def map[A](fa: Set[A])(f: A => ReactElement): Set[ReactElement] = fa.map(f) } @inline implicit def someContainer: ReactElementContainer[Some] = new ReactElementContainer[Some] { override def map[A](fa: Some[A])(f: A => ReactElement): Some[ReactElement] = Some(fa.map(f).get) } @inline implicit def tryContainer: ReactElementContainer[Try] = new ReactElementContainer[Try] { override def map[A](fa: Try[A])(f: A => ReactElement): Try[ReactElement] = fa.map(f) } @inline implicit def vectorContainer: ReactElementContainer[Vector] = new ReactElementContainer[Vector] { override def map[A](fa: Vector[A])(f: A => ReactElement): Vector[ReactElement] = fa.map(f) } }