scala.collection.immutable.HashMap Scala Examples

The following examples show how to use scala.collection.immutable.HashMap. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: BodyMapFactory.scala    From apalache   with Apache License 2.0 5 votes vote down vote up
package at.forsyte.apalache.tla.lir.storage

import at.forsyte.apalache.tla.lir.{TlaDecl, TlaOperDecl}

import scala.collection.immutable.HashMap

// [email protected]: why is it an object, not a class? You even have a constructor here, called newMap.
// TODO: refactor into a class.
// TODO: refactor the map to Map[String, TlaOperDecl], remove unnecessary generalization
object BodyMapFactory {
  def newMap: BodyMap = new HashMap[BodyMapKey,BodyMapVal]

  def makeFromDecl( decl : TlaDecl, initial : BodyMap = newMap ) : BodyMap =
    decl match {
      case decl : TlaOperDecl if !initial.contains( decl.name ) =>
        initial + (decl.name -> (decl.formalParams, decl.body))
      case _ => initial
    }

  def makeFromDecls( decls : Traversable[TlaDecl], initial : BodyMap = newMap ) : BodyMap =
    decls.foldLeft( initial ) { case (db, decl) => makeFromDecl( decl, db ) }
} 
Example 2
Source File: Rule.scala    From flamy   with Apache License 2.0 5 votes vote down vote up
package com.flaminem.flamy.parsing.hive.ast

import com.flaminem.flamy.model.exceptions.{FlamyException, UnexpectedBehaviorException}
import com.flaminem.flamy.parsing.hive.HiveParserUtils.drawTree
import org.apache.hadoop.hive.ql.parse.ASTNode

import scala.collection.immutable.HashMap
import scala.util.control.NonFatal


trait Rule[-Context, +Out] {

  def apply(pt: ASTNode, context: Context): Out

}


class RuleSet[Context, Out](
  val defaultRule: Rule[Context, Out],
  val map: Map[Int, Rule[Context, Out]] = HashMap()
) extends Rule[Context, Out] {

  def this(defaultRule: Rule[Context, Out], singleTransformers: SingleRule[Context, Out]*) {
    this(defaultRule, singleTransformers.map{t => (t.tokenType, t)}.toMap)
  }

  def apply(pt: ASTNode, context: Context): Out = {
    try{
      map.getOrElse(pt.getType, defaultRule).apply(pt, context)
    }
    catch{
      case e: FlamyException if !e.isInstanceOf[UnexpectedBehaviorException] => throw e
      case NonFatal(e) =>
        throw new UnexpectedBehaviorException(s"Tree is:\n${drawTree(pt)}", e)
    }
  }
} 
Example 3
Source File: SparseVectorTest.scala    From flink-parameter-server   with Apache License 2.0 5 votes vote down vote up
package hu.sztaki.ilab.ps.passive.aggressive.entities

import org.scalatest.{FlatSpec, Matchers}

import scala.collection.immutable.HashMap

class SparseVectorTest extends FlatSpec with Matchers {

  "SparseVectorBuilder and equal methode" should "be work" in {
    val SVortodox = new SparseVector(HashMap(1L -> "one", 2L -> "two"), 5)
    val SVmap = SparseVector.build(5, HashMap(1L -> "one", 2L -> "two"))
    val SVtraversal = SparseVector.build(5, List(1L -> "one", 2L -> "two"))
    val SVseq = SparseVector.build(5, 1L -> "one", 2L -> "two")
    SVortodox should be(SVmap)
    SVortodox should be(SVtraversal)
    SVortodox should be(SVseq)
  }

  "test" should "be work" in {
    import breeze.linalg._
    val x = DenseVector.zeros[Double](5)
    x(1) = 2
    val y = breeze.linalg.SparseVector.zeros[Double](5)
    y(2) = 3
    val q = x + y
    val w = q :* y

    println(x)
    println(y)
    println(q)
    println(w)
  }

} 
Example 4
Source File: PassiveAggressiveFilter.scala    From flink-parameter-server   with Apache License 2.0 5 votes vote down vote up
package hu.sztaki.ilab.ps.passive.aggressive.algorithm.binary

import hu.sztaki.ilab.ps.passive.aggressive.entities.SparseVector

import scala.collection.immutable.HashMap

object PassiveAggressiveFilter {
    def buildPAF(): PassiveAggressiveFilter  = new PassiveAggressiveFilterImp()
    def buildPAFI(Con :Int): PassiveAggressiveFilter = new PassiveAggressiveFilterImpI(Con)
    def buildPAFII(Con :Int): PassiveAggressiveFilter = new PassiveAggressiveFilterImpII(Con)
}

abstract class PassiveAggressiveFilter(C :Int) extends Serializable {
  
  protected def Const = C

  protected def getTau(data: SparseVector[Double], l: Double) : Double

  def delta(data: SparseVector[Double], model: HashMap[Long, Double], label: Int) = {
    assert(Set(1, -1) contains label)
    assert(data.getIndexes == model.keySet)
//    suffer loss:
    val l = math.max(0, 1 - label * model.merged(data.getValues)({ case ((k,v1),(_,v2)) => (k,v1*v2) }).values.sum)
    val multiplier = getTau(data, l) * label
    data.getValues map {case (key, value) => (key, value * multiplier)}
  }

  def predict(data: SparseVector[Double], model: HashMap[Long, Double]) =
    Math.signum(model.merged(data.getValues)({ case ((k,v1),(_,v2)) => (k,v1*v2) }).values.sum).toInt

  protected def quotient(data: SparseVector[Double], l: Double, denominatorConst: Double) = {
    val normSquare = data.getValues.values.map(math.pow(_,2)).sum
    if (denominatorConst == 0) l / normSquare
    else l / (normSquare + denominatorConst)
  }
}

class PassiveAggressiveFilterImp extends PassiveAggressiveFilter(0) {
  override def getTau(data: SparseVector[Double], l: Double): Double = quotient(data, l, 0)

}

class PassiveAggressiveFilterImpI(Con :Int) extends PassiveAggressiveFilter(Con) {
  override def getTau(data: SparseVector[Double], l: Double): Double = Math.min(Const, quotient(data, l, 0))
}

class PassiveAggressiveFilterImpII(Con :Int) extends PassiveAggressiveFilter(Con) {
  override def getTau(data: SparseVector[Double], l: Double): Double = quotient(data, l, 1 / (2 * Const))
} 
Example 5
Source File: SONAMetadataUtils.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.util

import com.tencent.angel.sona.ml.attribute._
import org.apache.spark.linalg.VectorUDT
import org.apache.spark.sql.types.StructField

import scala.collection.immutable.HashMap


/**
 * Helper utilities for algorithms using ML metadata
 */
object SONAMetadataUtils {

  /**
   * Examine a schema to identify the number of classes in a label column.
   * Returns None if the number of labels is not specified, or if the label column is continuous.
   */
  def getNumClasses(labelSchema: StructField): Option[Int] = {
    Attribute.fromStructField(labelSchema) match {
      case binAttr: BinaryAttribute => Some(2)
      case nomAttr: NominalAttribute => nomAttr.getNumValues
      case _: NumericAttribute | UnresolvedAttribute => None
    }
  }

  /**
   * Examine a schema to identify categorical (Binary and Nominal) features.
   *
   * @param featuresSchema  Schema of the features column.
   *                        If a feature does not have metadata, it is assumed to be continuous.
   *                        If a feature is Nominal, then it must have the number of values
   *                        specified.
   * @return  Map: feature index to number of categories.
   *          The map's set of keys will be the set of categorical feature indices.
   */
  def getCategoricalFeatures(featuresSchema: StructField): Map[Int, Int] = {
    val metadata = AttributeGroup.fromStructField(featuresSchema)
    if (metadata.attributes.isEmpty) {
      HashMap.empty[Int, Int]
    } else {
      metadata.attributes.get.zipWithIndex.flatMap { case (attr, idx) =>
        if (attr == null) {
          Iterator()
        } else {
          attr match {
            case _: NumericAttribute | UnresolvedAttribute => Iterator()
            case binAttr: BinaryAttribute => Iterator(idx -> 2)
            case nomAttr: NominalAttribute =>
              nomAttr.getNumValues match {
                case Some(numValues: Int) => Iterator(idx -> numValues)
                case None => throw new IllegalArgumentException(s"Feature $idx is marked as" +
                  " Nominal (categorical), but it does not have the number of values specified.")
              }
          }
        }
      }.toMap
    }
  }

  /**
   * Takes a Vector column and a list of feature names, and returns the corresponding list of
   * feature indices in the column, in order.
   * @param col  Vector column which must have feature names specified via attributes
   * @param names  List of feature names
   */
  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Long] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 6
Source File: ProtocolUtil.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.util

import io.jvm.uuid.UUID
import org.marvin.model.EngineMetadata

import scala.collection.immutable.HashMap

object ProtocolUtil {

  def generateProtocol(actionName:String): String ={
    s"${actionName}_${UUID.randomString}"
  }

  def splitProtocol(protocol: String, metadata: EngineMetadata): HashMap[String, String] = {
    var splitedProtocols = new HashMap[String, String]()

    for (_p <- protocol.split(",")){
      val _action = _p.substring(0, _p.indexOf("_"))

      if (_action != "pipeline") {
        for (_artifact <- metadata.actionsMap(_action).artifactsToPersist) splitedProtocols += (_artifact -> _p)
      }
      else{
        for (_paction <- metadata.pipelineActions) for (_artifact <- metadata.actionsMap(_paction).artifactsToPersist) splitedProtocols += (_artifact -> _p)
      }
    }

    splitedProtocols
  }

} 
Example 7
Source File: MetadataUtils.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.mllib.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 8
Source File: KinesisPublisherBatchResult.scala    From gfc-aws-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.aws.kinesis.client


import scala.collection.immutable.HashMap
import scala.language.postfixOps



  private[client]
  def apply( callResults: KinesisPublisherPutRecordsCallResults
           ): KinesisPublisherBatchResult = {

    val failedResultEntries = callResults.failures.map(_._2).flatten
    val errorCodes = failedResultEntries.map(_.getErrorCode).groupBy(identity _).mapValues(_.size).toSeq

    val shardRecordCounts =
      callResults.successes.map(_._2).flatten.
      map(r => Option(r.getShardId)).flatten.
      groupBy(identity _).mapValues(_.size).toSeq

    KinesisPublisherBatchResult(
      successRecordCount = callResults.successes.size
    , failureRecordCount = callResults.hardFailures.size
    , serviceErrorCount = if (callResults.isGenericServerError) 1 else 0
    , attemptCount = 1
    , errorCodes = HashMap(errorCodes: _*)
    , shardRecordCounts = HashMap(shardRecordCounts: _*)
    )
  }
} 
Example 9
Source File: MetadataUtils.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 10
Source File: MessageSerializer.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.runtime.akkageneric.serialization

import aecor.runtime.akkageneric.GenericAkkaRuntime.KeyedCommand
import aecor.runtime.akkageneric.GenericAkkaRuntimeActor.{ Command, CommandResult }
import akka.actor.ExtendedActorSystem
import akka.serialization.{ BaseSerializer, SerializerWithStringManifest }
import com.google.protobuf.ByteString
import scodec.bits.BitVector

import scala.collection.immutable.HashMap

class MessageSerializer(val system: ExtendedActorSystem)
    extends SerializerWithStringManifest
    with BaseSerializer {

  val KeyedCommandManifest = "A"
  val CommandManifest = "B"
  val CommandResultManifest = "C"

  private val fromBinaryMap =
    HashMap[String, Array[Byte] => AnyRef](
      KeyedCommandManifest -> keyedCommandFromBinary,
      CommandManifest -> commandFromBinary,
      CommandResultManifest -> commandResultFromBinary
    )

  override def manifest(o: AnyRef): String = o match {
    case KeyedCommand(_, _) => KeyedCommandManifest
    case Command(_)         => CommandManifest
    case CommandResult(_)   => CommandResultManifest
    case x                  => throw new IllegalArgumentException(s"Serialization of [$x] is not supported")
  }

  override def toBinary(o: AnyRef): Array[Byte] = o match {
    case Command(bytes) =>
      bytes.toByteArray
    case CommandResult(bytes) =>
      bytes.toByteArray
    case x @ KeyedCommand(_, _) =>
      entityCommandToBinary(x)
    case x => throw new IllegalArgumentException(s"Serialization of [$x] is not supported")
  }

  override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef =
    fromBinaryMap.get(manifest) match {
      case Some(f) => f(bytes)
      case other   => throw new IllegalArgumentException(s"Unknown manifest [$other]")
    }

  private def entityCommandToBinary(a: KeyedCommand): Array[Byte] =
    msg.KeyedCommand(a.key, ByteString.copyFrom(a.bytes.toByteBuffer)).toByteArray

  private def keyedCommandFromBinary(bytes: Array[Byte]): KeyedCommand =
    msg.KeyedCommand.parseFrom(bytes) match {
      case msg.KeyedCommand(key, commandBytes) =>
        KeyedCommand(key, BitVector(commandBytes.asReadOnlyByteBuffer()))
    }

  private def commandFromBinary(bytes: Array[Byte]): Command =
    Command(BitVector(bytes))

  private def commandResultFromBinary(bytes: Array[Byte]): CommandResult =
    CommandResult(BitVector(bytes))
} 
Example 11
Source File: MetadataUtils.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.mllib.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 12
Source File: MetadataUtils.scala    From sparkxgboost   with Apache License 2.0 5 votes vote down vote up
package rotationsymmetry.sxgboost

import org.apache.spark.ml.attribute._
import org.apache.spark.sql.types.StructField

import scala.collection.immutable.HashMap


private[sxgboost] object MetadataUtils {
  
  def getCategoricalFeatures(featuresSchema: StructField): Map[Int, Int] = {
    val metadata = AttributeGroup.fromStructField(featuresSchema)
    if (metadata.attributes.isEmpty) {
      HashMap.empty[Int, Int]
    } else {
      metadata.attributes.get.zipWithIndex.flatMap { case (attr, idx) =>
        if (attr == null) {
          Iterator()
        } else {
          attr match {
            case _: NumericAttribute | UnresolvedAttribute => Iterator()
            case binAttr: BinaryAttribute => Iterator(idx -> 2)
            case nomAttr: NominalAttribute =>
              nomAttr.getNumValues match {
                case Some(numValues: Int) => Iterator(idx -> numValues)
                case None => throw new IllegalArgumentException(s"Feature $idx is marked as" +
                  " Nominal (categorical), but it does not have the number of values specified.")
              }
          }
        }
      }.toMap
    }
  }
} 
Example 13
Source File: MetadataUtils.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.sql.types.StructField



  def getCategoricalFeatures(featuresSchema: StructField): Map[Int, Int] = {
    val metadata = AttributeGroup.fromStructField(featuresSchema)
    if (metadata.attributes.isEmpty) {
      HashMap.empty[Int, Int]
    } else {
      metadata.attributes.get.zipWithIndex.flatMap { case (attr, idx) =>
        if (attr == null) {
          Iterator()
        } else {
          attr match {
            case _: NumericAttribute | UnresolvedAttribute => Iterator()
            case binAttr: BinaryAttribute => Iterator(idx -> 2)
            case nomAttr: NominalAttribute =>
              nomAttr.getNumValues match {
                case Some(numValues: Int) => Iterator(idx -> numValues)
                case None => throw new IllegalArgumentException(s"Feature $idx is marked as" +
                  " Nominal (categorical), but it does not have the number of values specified.")
              }
          }
        }
      }.toMap
    }
  }

} 
Example 14
Source File: MetadataUtils.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 15
Source File: Utils.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.utils

import play.api.Logger

import scala.collection.immutable.HashMap
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.{ Failure, Success, Try }

object Utils {
  def flatten[T](xs: Seq[Try[T]]): Try[Seq[T]] = {
    val (ss: Seq[Success[T]] @unchecked, fs: Seq[Failure[T]] @unchecked) =
      xs.partition(_.isSuccess)

    if (fs.isEmpty) Success(ss map (_.get))
    else Failure[Seq[T]](fs(0).exception) // Only keep the first failure
  }

  // Utility function to return None for empty sequences
  def seqOption[T](seq: Seq[T]): Option[Seq[T]] = {
    if (seq.isEmpty)
      None
    else
      Some(seq)
  }

  def reverseOptionTry[T](a: Option[Try[T]]): Try[Option[T]] = {
    a match {
      case None =>
        Success(None)
      case Some(Success(b)) =>
        Success(Some(b))
      case Some(Failure(e)) =>
        Failure(e)
    }
  }

  def mergeMap[A, B](ms: Iterable[HashMap[A, B]])(f: (B, B) => B): HashMap[A, B] =
    (for (m <- ms; kv <- m) yield kv).foldLeft(HashMap[A, B]()) { (a, kv) =>
      a + (if (a.contains(kv._1)) kv._1 -> f(a(kv._1), kv._2) else kv)
    }

  def time[R](name: String, logger: Logger)(block: => R): R = {
    val t0 = System.nanoTime()
    val result = block // call-by-name
    val t1 = System.nanoTime()
    logger.info(s"[$name] Elapsed time: ${(t1 - t0) / 1000000.0}ms")
    result
  }

  def timeFuture[R](name: String, logger: Logger)(block: => Future[R])(implicit ec: ExecutionContext): Future[R] = {
    val t0 = System.nanoTime()
    block // call-by-name
      .andThen {
        case Success(_) =>
          val t1 = System.nanoTime()
          logger.info(s"[$name] Elapsed time: ${(t1 - t0) / 1000000.0}ms")
      }
  }

} 
Example 16
Source File: JsonStatsService.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.api.service.monitoring

import org.hatdex.hat.api.models.{ EndpointData, EndpointStats }
import org.hatdex.hat.utils.Utils
import play.api.libs.json._

import scala.collection.immutable.HashMap

object JsonStatsService {
  protected[service] def countJsonPaths(data: JsValue, path: Seq[String] = Seq()): HashMap[String, Long] = {
    data match {
      case v: JsArray =>
        val newPath = path.dropRight(1) :+ (path.lastOption.getOrElse("") + "[]")
        Utils.mergeMap(v.value.map(countJsonPaths(_, newPath)))((v1, v2) => v1 + v2)

      case v: JsObject =>
        val temp = v.fields map {
          case (key, value) =>
            countJsonPaths(value, path :+ key)
        }
        Utils.mergeMap(temp)((v1, v2) => v1 + v2)

      case _: JsValue => HashMap(path.mkString(".") -> 1L)
    }
  }

  protected[service] def countEndpointData(data: EndpointData): HashMap[String, HashMap[String, Long]] = {
    val counts = HashMap(data.endpoint -> countJsonPaths(data.data))
    val linkedCounts = data.links map { links =>
      links.map(countEndpointData)
    }
    val allCounts = linkedCounts.getOrElse(Seq()) :+ counts
    Utils.mergeMap(allCounts)((v1, v2) => Utils.mergeMap(Seq(v1, v2))((v1, v2) => v1 + v2))
  }

  def endpointDataCounts(data: Seq[EndpointData]): Iterable[EndpointStats] = {
    val counts = data.map(countEndpointData)
    val combined = Utils.mergeMap(counts)((v1, v2) => Utils.mergeMap(Seq(v1, v2))((v1, v2) => v1 + v2))
    combined map {
      case (endpoint, eCounts) => EndpointStats(endpoint, eCounts)
    }
  }
} 
Example 17
Source File: TrackerMap.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver.services.tracking

import java.util.concurrent.atomic.AtomicReference

import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.completion.Completion
import com.daml.logging.{ContextualizedLogger, LoggingContext}
import org.slf4j.LoggerFactory

import scala.collection.immutable.HashMap
import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


  final class AsyncResource[T <: AutoCloseable](future: Future[T]) {
    private val logger = LoggerFactory.getLogger(this.getClass)

    // Must progress Waiting => Ready => Closed or Waiting => Closed.
    val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting)

    future.andThen({
      case Success(t) =>
        if (!state.compareAndSet(Waiting, Ready(t))) {
          // This is the punch line of AsyncResource.
          // If we've been closed in the meantime, we must close the underlying resource also.
          // This "on-failure-to-complete" behavior is not present in scala or java Futures.
          t.close()
        }
      // Someone should be listening to this failure downstream
      // TODO(mthvedt): Refactor so at least one downstream listener is always present,
      // and exceptions are never dropped.
      case Failure(ex) =>
        logger.error("failure to get async resource", ex)
        state.set(Closed)
    })(DirectExecutionContext)

    def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = {
      state.get() match {
        case Waiting => future.flatMap(f)
        case Closed => throw new IllegalStateException()
        case Ready(t) => f(t)
      }
    }

    def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] =
      flatMap(t => Future.successful(f(t)))

    def ifPresent[U](f: T => U): Option[U] = state.get() match {
      case Ready(t) => Some(f(t))
      case _ => None
    }

    def close(): Unit = state.getAndSet(Closed) match {
      case Ready(t) => t.close()
      case _ =>
    }
  }

  def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap =
    new TrackerMap(retentionPeriod)
} 
Example 18
Source File: RadixTreeBench.scala    From radixtree   with Apache License 2.0 5 votes vote down vote up
package com.rklaehn.radixtree

import cats.kernel.Hash
import cats.kernel.instances.unit._
import ichi.bench.Thyme

import scala.collection.immutable.{HashMap, SortedMap}
import scala.io.Source
import scala.util.hashing.Hashing
import Instances._

object RadixTreeBench extends App {
  val names = Source.fromURL("http://www-01.sil.org/linguistics/wordlists/english/wordlist/wordsEn.txt").getLines.toArray
  println(names.length)
  println(names.take(10).mkString("\n"))

  implicit object EqHashing extends Hashing[Unit] {

    override def hash(x: Unit): Int = 0
  }

  lazy val th = Thyme.warmed(verbose = println, warmth = Thyme.HowWarm.BenchOff)

  val kvs = names.map(s => s -> (()))

  val kvsc = names.map(s => s.toCharArray -> (()))

  val radixTree = RadixTree(kvs: _*).packed

  val radixTreeC = RadixTree(kvsc: _*).packed

  val sortedMap = SortedMap(kvs: _*)

  val hashMap = HashMap(kvs: _*)

  def create0[K: Ordering, V](kvs: Array[(K, V)]): Int = {
    SortedMap(kvs: _*).size
  }

  def create1[K, V](kvs: Array[(K, V)])(implicit f:RadixTree.Key[K]): Int = {
    RadixTree[K,V](kvs: _*).count
  }

  def lookup0(): Boolean = {
    kvs.forall {
      case (k,v) => radixTree.contains(k)
    }
  }

  def lookup1(): Boolean = {
    kvs.forall {
      case (k,v) => hashMap.contains(k)
    }
  }

  def lookup2(): Boolean = {
    kvs.forall {
      case (k,v) => sortedMap.contains(k)
    }
  }

  def filterPrefixS(): AnyRef = {
    sortedMap.filter { case (k,v) => k.startsWith("one") }
  }

  def filterPrefixH(): AnyRef = {
    hashMap.filter { case (k,v) => k.startsWith("one") }
  }

  def filterPrefixR(): AnyRef = {
    radixTree.filterPrefix("one")
  }

  def filterContainsS(): AnyRef = {
    sortedMap.filter { case (k,v) => k.contains("one") }
  }

  def filterContainsH(): AnyRef = {
    hashMap.filter { case (k,v) => k.contains("one") }
  }

  def filterContainsR(): AnyRef = {
    radixTree.filterKeysContaining("one")
  }

  th.pbenchOffWarm("Create 1000 SortedMap vs. RadixTree")(th.Warm(create0(kvs)))(th.Warm(create1(kvs)))
  th.pbenchOffWarm("Lookup 1000 SortedMap vs. RadixTree")(th.Warm(lookup0()))(th.Warm(lookup1()))

  th.pbenchOffWarm("FilterPrefix HashMap vs. RadixTree")(th.Warm(filterPrefixH()))(th.Warm(filterPrefixR()))
  th.pbenchOffWarm("FilterPrefix SortedMap vs. RadixTree")(th.Warm(filterPrefixS()))(th.Warm(filterPrefixR()))

  th.pbenchOffWarm("FilterContains HashMap vs. RadixTree")(th.Warm(filterContainsH()))(th.Warm(filterContainsR()))
  th.pbenchOffWarm("FilterContains SortedMap vs. RadixTree")(th.Warm(filterContainsS()))(th.Warm(filterContainsR()))
} 
Example 19
Source File: ImplicitsRecursionGuard.scala    From intellij-lsp   with Apache License 2.0 5 votes vote down vote up
package org.jetbrains.plugins.scala.lang.psi.implicits

import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.types.ScType

import scala.collection.immutable.HashMap


object ImplicitsRecursionGuard {

  type RecursionMap = Map[PsiElement, List[ScType]]
  private val recursionMap: ThreadLocal[RecursionMap] =
    new ThreadLocal[RecursionMap] {
      override def initialValue(): RecursionMap =
        new HashMap[PsiElement, List[ScType]]
    }

  def currentMap: RecursionMap = recursionMap.get()

  def setRecursionMap(map: Map[PsiElement, List[ScType]]): Unit = recursionMap.set(map)

  def isRecursive(element: PsiElement, tp: ScType, checkRecursive: (ScType, Seq[ScType]) => Boolean): Boolean =
    checkRecursive(tp, getSearches(element))

  def beforeComputation(element: PsiElement, tp: ScType): Unit = addLast(element, tp)

  def afterComputation(element: PsiElement): Unit = removeLast(element)

  private def getSearches(element: PsiElement): List[ScType] = {
    recursionMap.get().get(element) match {
      case Some(buffer) => buffer
      case _ => List.empty
    }
  }

  private def addLast(element: PsiElement, tp: ScType) {
    recursionMap.get().get(element) match {
      case Some(list) =>
        recursionMap.set(recursionMap.get().updated(element, tp :: list))
      case _ =>
        recursionMap.set(recursionMap.get() + (element -> List(tp)))
    }
  }

  private def removeLast(element: PsiElement) {
    recursionMap.get().get(element) match {
      case Some(list) =>
        list match {
          case _ :: tl => recursionMap.set(recursionMap.get().updated(element, tl))
          case _ => recursionMap.set(recursionMap.get() - element)
        }
      case _ => throw new RuntimeException("Match is not exhaustive")
    }
  }
} 
Example 20
Source File: Normalized.scala    From Adenium   with Apache License 2.0 5 votes vote down vote up
package com.adenium.common

import com.adenium.utils.May._

import scala.collection.immutable.HashMap



  def TSV2Normalized( TSV: String ): Array[ (Int, String)] = {

    val arr = TSV.split( TAB)
    val ret =
      if ( arr.headOption.contains( versionHeader) ) {
        arr
          .drop(1)
          .grouped(2)
          .flatMap ( ar => maybeWarn {
            val idx = ar(0).toInt
            val value = ar(1)
            (idx, value)
          })
          .toArray
      }
      else
        arr.zipWithIndex.map { case (v, i) => ( i, v) }
    ret
  }

  def parsed2TSV( parsed: Parsed): String = {
    val TSV: String = warn {
      parsed.fields.map ( _.flatMap { _.valueWithId }.mkString( TAB) )
    }("[ Normalized ] : toTSV = Fields is empty.").getOrElse( "")

    versionHeader + TAB + TSV
  }
} 
Example 21
Source File: exercise10.scala    From scala-for-the-Impatient   with MIT License 5 votes vote down vote up
import scala.collection.immutable.HashMap

val str = "abdcsdcd"
val frequencies = str.par.aggregate(HashMap[Char,Int]())(
  {
    (a,b) =>
      a + (b -> (a.getOrElse(b,0) + 1))
  }
  ,
  {
    (map1,map2) =>
      (map1.keySet ++ map2.keySet).foldLeft( HashMap[Char,Int]() ) {
        (result,k) =>
          result + ( k -> ( map1.getOrElse(k,0 ) + map2.getOrElse(k,0) ) )
      }
  }
) 
Example 22
Source File: MetadataUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 23
Source File: RedisLshTable.scala    From lsh-scala   with Apache License 2.0 5 votes vote down vote up
package io.krom.lsh

import breeze.linalg.DenseVector
import com.lambdaworks.jacks.JacksMapper
import com.redis.RedisClient

import scala.collection.immutable.HashMap

class RedisLshTable(redisdb: RedisClient, prefix: Option[String] = None) extends LshTable(prefix) {

  override def put(hash: String, label: String, point: DenseVector[Double]): Unit = {
    val key = createKey(hash)
    val value = (label, key, point.toArray)

    redisdb.pipeline { pipe =>
      pipe.sadd(key, label)
      pipe.set(label, JacksMapper.writeValueAsString(value))
    }
  }

  override def update(hash: String, label: String, point: DenseVector[Double]): Unit = {
    val key = createKey(hash)

    val item = redisdb.get(label) match {
      case None => return
      case Some(x:String) => JacksMapper.readValue[(String, String, Array[Double])](x)
    }
    val oldKey = item._2

    val value = (label, key, point.toArray)

    redisdb.pipeline { pipe =>
      pipe.set(label, JacksMapper.writeValueAsString(value))
      if (key != oldKey) pipe.srem(oldKey, label)
      pipe.sadd(key, label)
    }
  }

  override def get(hash: String): List[(String, String, DenseVector[Double])] = {
    val key = createKey(hash)
    val items = redisdb.smembers(key)

    val itemDetails = redisdb.pipeline { pipe =>
      for {
        item <- items.get
        if item.isDefined
      } pipe.get(item.get)
    }

    for {
      item <- itemDetails.get
      newItem = item match {
        case Some(x:String) => Some(JacksMapper.readValue[(String, String, Array[Double])](x))
        case None => None
      }
      if newItem.isDefined
    } yield ( newItem.get._1, newItem.get._2, DenseVector(newItem.get._3) )
  }
}

object RedisLshTable {
  def createTables(numTables: Int, redisConf: HashMap[String, String], prefix: Option[String] = None): IndexedSeq[LshTable] = {
    val redisHost = if (redisConf.contains("host")) redisConf("host") else "localhost"
    val redisPort = if (redisConf.contains("port")) Integer.parseInt(redisConf("port")) else 6379
    for {
      redisDb <- 0 until numTables
    } yield new RedisLshTable(new RedisClient(redisHost, redisPort, redisDb), prefix)
  }
} 
Example 24
Source File: WildcardIndex.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.util

import scala.annotation.tailrec
import scala.collection.immutable.HashMap

private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = WildcardTree[T](), doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) {

  def insert(elems: Array[String], d: T): WildcardIndex[T] = elems.lastOption match {
    case Some("**") ⇒ copy(doubleWildcardTree = doubleWildcardTree.insert(elems.iterator, d))
    case Some(_) ⇒ copy(wildcardTree = wildcardTree.insert(elems.iterator, d))
    case _ ⇒ this
  }

  def find(elems: Iterable[String]): Option[T] =
    (if (wildcardTree.isEmpty) {
      if (doubleWildcardTree.isEmpty) {
        WildcardTree[T]() // empty
      } else {
        doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator)
      }
    } else {
      val withSingleWildcard = wildcardTree.findWithSingleWildcard(elems.iterator)
      if (withSingleWildcard.isEmpty) {
        doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator)
      } else {
        withSingleWildcard
      }
    }).data

  def isEmpty: Boolean = wildcardTree.isEmpty && doubleWildcardTree.isEmpty

}

private[akka] object WildcardTree {
  private val empty = new WildcardTree[Nothing]()
  def apply[T](): WildcardTree[T] = empty.asInstanceOf[WildcardTree[T]]
}

private[akka] final case class WildcardTree[T](data: Option[T] = None, children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) {

  def isEmpty: Boolean = data.isEmpty && children.isEmpty

  def insert(elems: Iterator[String], d: T): WildcardTree[T] =
    if (!elems.hasNext) {
      copy(data = Some(d))
    } else {
      val e = elems.next()
      copy(children = children.updated(e, children.getOrElse(e, WildcardTree[T]()).insert(elems, d)))
    }

  @tailrec def findWithSingleWildcard(elems: Iterator[String]): WildcardTree[T] =
    if (!elems.hasNext) this
    else {
      children.get(elems.next()) match {
        case Some(branch) ⇒ branch.findWithSingleWildcard(elems)
        case None ⇒ children.get("*") match {
          case Some(branch) ⇒ branch.findWithSingleWildcard(elems)
          case None ⇒ WildcardTree[T]()
        }
      }
    }

  @tailrec def findWithTerminalDoubleWildcard(elems: Iterator[String], alt: WildcardTree[T] = WildcardTree[T]()): WildcardTree[T] = {
    if (!elems.hasNext) this
    else {
      val newAlt = children.getOrElse("**", alt)
      children.get(elems.next()) match {
        case Some(branch) ⇒ branch.findWithTerminalDoubleWildcard(elems, newAlt)
        case None ⇒ children.get("*") match {
          case Some(branch) ⇒ branch.findWithTerminalDoubleWildcard(elems, newAlt)
          case None ⇒ newAlt
        }
      }
    }
  }
} 
Example 25
Source File: MapCreateAccessBench.scala    From abc   with Apache License 2.0 5 votes vote down vote up
package com.rklaehn.abc

 import cats.kernel.instances.all._
import ichi.bench.Thyme
import ichi.bench.Thyme.HowWarm

import scala.collection.immutable.HashMap

object MapCreateAccessBench extends App {

  val th = Thyme.warmed(verbose = println, warmth = HowWarm.BenchOff)

  def create(): Unit = {
    for (n ← Array(1, 10, 100, 1000, 10000, 100000, 1000000)) {
      val elements = (0 until n).toArray.map(x ⇒ x → x)
      def s0 = HashMap(elements:_*)
      def s1 = ArrayMap(elements:_*)
      th.pbenchOffWarm(s"Create HashMap[Int, Int] vs ArrayMap[Int, Int] $n")(
        th.Warm(s0.asInstanceOf[AnyRef]))(
          th.Warm(s1.asInstanceOf[AnyRef]))
    }
  }

  def access(): Unit = {
    for (n ← Array(1, 10, 100, 1000, 10000, 100000, 1000000)) {
      val elements = (0 until n).toArray.map(x ⇒ x → x)
      val s0 = HashMap(elements:_*)
      val s1 = ArrayMap(elements:_*)
      val x = n/2
      val r = th.pbenchOffWarm(s"Access HashMap[Int, Int] vs ArrayMap[Int, Int] $n")(
        th.Warm(s0(x)))(
          th.Warm(s1.apply0(x)))
    }
  }

  def createIntString(): Unit = {
    for (n ← Array(1, 10, 100, 1000, 10000, 100000)) {
      val elements = (0 until n).toArray.map(x ⇒ x → x.toString)
      def s0 = HashMap(elements:_*)
      def s1 = ArrayMap(elements:_*)
      th.pbenchOffWarm(s"Create HashMap[Int, String] vs ArrayMap[Int, String] $n")(
        th.Warm(s0.asInstanceOf[AnyRef]))(
          th.Warm(s1.asInstanceOf[AnyRef]))
    }
  }

  def accessIntString(): Unit = {
    for (n ← Array(1, 10, 100, 1000, 10000, 100000)) {
      val elements = (0 until n).toArray.map(x ⇒ x → x.toString)
      val s0 = HashMap(elements:_*)
      val s1 = ArrayMap(elements:_*)
      val x = n/2
      val r = th.pbenchOffWarm(s"Access HashMap[Int, String] vs ArrayMap[Int, String] $n")(
        th.Warm(s0(x)))(
          th.Warm(s1.apply0(x)))
    }
  }

  create()
  access()
  createIntString()
  accessIntString()
} 
Example 26
Source File: ModelDataReader.scala    From spark-ml-serving   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.spark_ml_serving.common

import io.hydrosphere.spark_ml_serving.common.reader._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import parquet.hadoop.{ParquetFileReader, ParquetReader}
import parquet.schema.MessageType

import scala.collection.immutable.HashMap
import scala.collection.mutable

object ModelDataReader {

  def parse(source: ModelSource, path: String): LocalData = {
    source.findFile(path, recursive = true, _.endsWith(".parquet")) match {
      case Some(p) => readData(p)
      case None    => LocalData.empty
    }
  }

  private def readData(p: Path): LocalData = {
    val conf: Configuration = new Configuration()
    val metaData            = ParquetFileReader.readFooter(conf, p, NO_FILTER)
    val schema: MessageType = metaData.getFileMetaData.getSchema

    val reader = ParquetReader.builder[SimpleRecord](new SimpleReadSupport(), p.getParent).build()
    var result = LocalData.empty

    try {
      var value = reader.read()
      while (value != null) {
        val valMap = value.struct(HashMap.empty[String, Any], schema)
        result = mergeMaps(result, valMap)
        value  = reader.read()
      }
      result
    } finally {
      if (reader != null) {
        reader.close()
      }
    }
  }

  private def mergeMaps(acc: LocalData, map: HashMap[String, Any]) = {
    var result = acc
    map.foreach {
      case (k, v) => result = result.appendToColumn(k, List(v))
    }
    result
  }
} 
Example 27
Source File: WordCount.scala    From spark-solr   with Apache License 2.0 5 votes vote down vote up
package com.lucidworks.spark.example.query

import com.lucidworks.spark.SparkApp.RDDProcessor
import com.lucidworks.spark.rdd.{SelectSolrRDD, SolrRDD}
import com.lucidworks.spark.util.ConfigurationConstants._
import org.apache.commons.cli.{CommandLine, Option}
import org.apache.solr.common.SolrDocument
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.immutable.HashMap


class WordCount extends RDDProcessor{
  def getName: String = "word-count"

  def getOptions: Array[Option] = {
    Array(
    Option.builder()
          .argName("QUERY")
          .longOpt("query")
          .hasArg
          .required(false)
          .desc("URL encoded Solr query to send to Solr")
          .build()
    )
  }

  def run(conf: SparkConf, cli: CommandLine): Int = {
    val zkHost = cli.getOptionValue("zkHost", "localhost:9983")
    val collection = cli.getOptionValue("collection", "collection1")
    val queryStr = cli.getOptionValue("query", "*:*")

    val sc = SparkContext.getOrCreate(conf)
    val solrRDD: SelectSolrRDD = new SelectSolrRDD(zkHost, collection, sc)
    val rdd: RDD[SolrDocument]  = solrRDD.query(queryStr)

    val words: RDD[String] = rdd.map(doc => if (doc.containsKey("text_t")) doc.get("text_t").toString else "")
    val pWords: RDD[String] = words.flatMap(s => s.toLowerCase.replaceAll("[.,!?\n]", " ").trim().split(" "))

    val wordsCountPairs: RDD[(String, Int)] = pWords.map(s => (s, 1))
                                                    .reduceByKey((a,b) => a+b)
                                                    .map(item => item.swap)
                                                    .sortByKey(false)
                                                    .map(item => item.swap)

    wordsCountPairs.take(20).iterator.foreach(println)

    val sparkSession: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    // Now use schema information in Solr to build a queryable SchemaRDD

    // Pro Tip: SolrRDD will figure out the schema if you don't supply a list of field names in your query
    val options = HashMap[String, String](
      SOLR_ZK_HOST_PARAM -> zkHost,
      SOLR_COLLECTION_PARAM -> collection,
      SOLR_QUERY_PARAM -> queryStr
      )

    val df: DataFrame = sparkSession.read.format("solr").options(options).load()
    val numEchos = df.filter(df.col("type_s").equalTo("echo")).count()
    println("numEchos >> " + numEchos)

    sc.stop()
    0
  }
} 
Example 28
Source File: MetadataUtils.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.util

import scala.collection.immutable.HashMap

import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.VectorUDT
import org.apache.spark.sql.types.StructField



  def getFeatureIndicesFromNames(col: StructField, names: Array[String]): Array[Int] = {
    require(col.dataType.isInstanceOf[VectorUDT], s"getFeatureIndicesFromNames expected column $col"
      + s" to be Vector type, but it was type ${col.dataType} instead.")
    val inputAttr = AttributeGroup.fromStructField(col)
    names.map { name =>
      require(inputAttr.hasAttr(name),
        s"getFeatureIndicesFromNames found no feature with name $name in column $col.")
      inputAttr.getAttr(name).index.get
    }
  }
} 
Example 29
Source File: GraphsSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.language

import org.scalatest.prop.TableDrivenPropertyChecks
import org.scalatest.{Matchers, WordSpec}

import scala.collection.immutable.HashMap

class GraphsSpec extends WordSpec with Matchers with TableDrivenPropertyChecks {

  import Graphs._

  "topoSort" should {

    val dags = Table[Graph[Int]](
      "directed acyclic graphs",
      HashMap.empty,
      HashMap(
        3 -> Set(8, 10),
        5 -> Set(11),
        7 -> Set(8, 11),
        8 -> Set(9),
        11 -> Set(2, 9, 10)
      )
    )

    val dcgs = Table[Graph[String]](
      "directed cyclic graphs",
      HashMap("1" -> Set("1")),
      HashMap("A" -> Set("B"), "B" -> Set("A")),
      HashMap(
        "A" -> Set("B", "C", "E"),
        "B" -> Set("C", "E"),
        "C" -> Set("D"),
        "D" -> Set("B", "E"),
        "E" -> Set("E"),
      )
    )

    "successfully sort all edges of directed acyclic graph" in {
      dags.forEvery { dag =>
        val result = topoSort(dag)
        result shouldBe 'right

        val Right(sortedEdges) = result

        val allEdges = dag.values.foldLeft(dag.keySet)(_ | _)
        sortedEdges.toSet shouldBe allEdges

        val edgeRank = sortedEdges.zipWithIndex.toMap
        for {
          e <- dag.keys
          e_ <- dag(e)
        } edgeRank(e_) should be < edgeRank(e)
      }
    }

    "fail on cyclic graph and return a proper cycle" in {
      dcgs.forEvery { dcg =>
        val result = topoSort(dcg)
        result shouldBe 'left

        val Left(Cycle(loop)) = result

        ((loop.last :: loop) zip loop).foreach {
          case (e, e_) =>
            dcg(e) should contain(e_)
        }
      }
    }
  }

} 
Example 30
Source File: SortedLookupList.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf.data

import ScalazEqual.{equalBy, orderBy}

import scala.language.higherKinds
import scalaz.{Applicative, Equal, Order, Traverse}
import scalaz.std.tuple._
import scalaz.std.string._
import scalaz.syntax.traverse._

import scala.collection.immutable.HashMap


// Note that keys are ordered using Utf8 ordering
final class SortedLookupList[+X] private (entries: ImmArray[(String, X)]) extends Equals {

  def mapValue[Y](f: X => Y) = new SortedLookupList(entries.map { case (k, v) => k -> f(v) })

  def toImmArray: ImmArray[(String, X)] = entries

  def keys: ImmArray[String] = entries.map(_._1)

  def values: ImmArray[X] = entries.map(_._2)

  def iterator: Iterator[(String, X)] = entries.iterator

  def toHashMap: HashMap[String, X] = HashMap(entries.toSeq: _*)

  def foreach(f: ((String, X)) => Unit): Unit = entries.foreach(f)

  override def canEqual(that: Any): Boolean = that.isInstanceOf[SortedLookupList[_]]

  override def equals(obj: Any): Boolean = {
    obj match {
      case other: SortedLookupList[X] if other canEqual this => other.toImmArray == entries
      case _ => false
    }
  }

  override def hashCode(): Int = entries.hashCode()

  override def toString: String =
    s"SortedLookupList(${entries.map { case (k, v) => k -> v }.toSeq.mkString(",")})"
}

object SortedLookupList extends SortedLookupListInstances {

  def fromImmArray[X](entries: ImmArray[(String, X)]): Either[String, SortedLookupList[X]] = {
    entries.toSeq
      .groupBy(_._1)
      .collectFirst {
        case (k, l) if l.size > 1 => s"key $k duplicated when trying to build map"
      }
      .toLeft(new SortedLookupList(entries.toSeq.sortBy(_._1)(Utf8.Ordering).toImmArray))
  }

  def fromSortedImmArray[X](entries: ImmArray[(String, X)]): Either[String, SortedLookupList[X]] = {
    entries
      .map(_._1)
      .toSeq
      .sliding(2)
      .collectFirst {
        case Seq(k1, k2) if Utf8.Ordering.gteq(k1, k2) => s"the list $entries is not sorted by key"
      }
      .toLeft(new SortedLookupList(entries))
  }

  def apply[X](entries: Map[String, X]): SortedLookupList[X] =
    new SortedLookupList(ImmArray(entries.toSeq.sortBy(_._1)))

  def empty[X]: SortedLookupList[X] = new SortedLookupList(ImmArray.empty)

  implicit def `SLL Order instance`[X: Order]: Order[SortedLookupList[X]] =
    orderBy(_.toImmArray, true)

  implicit val `SLL covariant instance`: Traverse[SortedLookupList] =
    new Traverse[SortedLookupList] {
      override def traverseImpl[G[_]: Applicative, A, B](fa: SortedLookupList[A])(
          f: A => G[B]): G[SortedLookupList[B]] =
        fa.toImmArray traverse (_ traverse f) map (new SortedLookupList(_))
    }

}

sealed abstract class SortedLookupListInstances {
  implicit def `SLL Equal instance`[X: Equal]: Equal[SortedLookupList[X]] =
    equalBy(_.toImmArray, true)
} 
Example 31
Source File: TransactionVersionSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.lf
package transaction

import data.ImmArray
import value.{Value, ValueVersion, ValueVersions}
import Value.{ContractId, ValueOptional, VersionedValue}
import org.scalatest.{Matchers, WordSpec}

import scala.collection.immutable.HashMap

class TransactionVersionSpec extends WordSpec with Matchers {
  import TransactionVersionSpec._

  import VersionTimeline.maxVersion

  private[this] val supportedValVersions =
    ValueVersions.SupportedDevVersions.copy(min = ValueVersion("1"))
  private[this] val supportedTxVersions =
    TransactionVersions.SupportedDevVersions.copy(min = TransactionVersion("1"))

  "assignVersion" should {
    "prefer picking an older version" in {
      assignTxVersion(assignValueVersions(dummyCreateTransaction)) shouldBe Right(
        maxVersion(supportedTxVersions.min, TransactionVersion("1")))
    }

    "pick version 2 when confronted with newer data" in {
      val usingOptional =
        dummyCreateTransaction map3 (identity, identity, v => ValueOptional(Some(v)))
      assignTxVersion(assignValueVersions(usingOptional)) shouldBe Right(
        maxVersion(supportedTxVersions.min, TransactionVersion("2")))
    }

    "pick version 7 when confronted with exercise result" in {
      val hasExerciseResult =
        dummyExerciseWithResultTransaction map3 (identity, identity, v => ValueOptional(Some(v)))
      assignTxVersion(assignValueVersions(hasExerciseResult)) shouldBe Right(
        maxVersion(supportedTxVersions.min, TransactionVersion("7")))
    }

    "pick version 2 when confronted with exercise result" in {
      val hasExerciseResult =
        dummyExerciseTransaction map3 (identity, identity, v => ValueOptional(Some(v)))
      assignTxVersion(assignValueVersions(hasExerciseResult)) shouldBe Right(
        maxVersion(supportedTxVersions.min, TransactionVersion("2")))
    }

    "crash the picked version is more recent that the maximal supported version" in {
      val supportedVersions = VersionRange(TransactionVersion("1"), TransactionVersion("5"))
      val hasExerciseResult =
        dummyExerciseWithResultTransaction map3 (identity, identity, v => ValueOptional(Some(v)))
      TransactionVersions.assignVersion(assignValueVersions(hasExerciseResult), supportedVersions) shouldBe 'left
    }

  }

  private[this] def assignValueVersions[Nid, Cid <: ContractId](
      t: GenTransaction[Nid, Cid, Value[Cid]],
  ): GenTransaction[Nid, Cid, VersionedValue[Cid]] =
    t map3 (identity, identity, ValueVersions.assertAsVersionedValue(_, supportedValVersions))

  private[this] def assignTxVersion[Nid, Cid <: ContractId](
      t: GenTransaction[Nid, Cid, VersionedValue[Cid]],
  ): Either[String, TransactionVersion] =
    TransactionVersions.assignVersion(t, supportedTxVersions)

}

object TransactionVersionSpec {

  import TransactionSpec._

  private[this] val singleId = Value.NodeId(0)
  private val dummyCreateTransaction =
    mkTransaction(HashMap(singleId -> dummyCreateNode("cid1")), ImmArray(singleId))
  private val dummyExerciseWithResultTransaction =
    mkTransaction(
      HashMap(singleId -> dummyExerciseNode("cid2", ImmArray.empty)),
      ImmArray(singleId))
  private val dummyExerciseTransaction =
    mkTransaction(
      HashMap(singleId -> dummyExerciseNode("cid3", ImmArray.empty, false)),
      ImmArray(singleId),
    )

}