scala.collection.concurrent.TrieMap Scala Examples

The following examples show how to use scala.collection.concurrent.TrieMap. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MetricsInterceptor.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import com.codahale.metrics.Timer
import com.daml.metrics.Metrics
import io.grpc.ForwardingServerCall.SimpleForwardingServerCall
import io.grpc._

import scala.collection.concurrent.TrieMap


final class MetricsInterceptor(metrics: Metrics) extends ServerInterceptor {

  // Cache the result of calling MetricsInterceptor.nameFor, which practically has a
  // limited co-domain and whose cost we don't want to pay every time an endpoint is hit
  private val fullServiceToMetricNameCache = TrieMap.empty[String, Timer]

  override def interceptCall[ReqT, RespT](
      call: ServerCall[ReqT, RespT],
      headers: Metadata,
      next: ServerCallHandler[ReqT, RespT],
  ): ServerCall.Listener[ReqT] = {
    val fullMethodName = call.getMethodDescriptor.getFullMethodName
    val timer = fullServiceToMetricNameCache.getOrElseUpdate(
      fullMethodName,
      metrics.daml.lapi.forMethod(MetricsNaming.nameFor(fullMethodName)))
    val timerCtx = timer.time
    next.startCall(new TimedServerCall(call, timerCtx), headers)
  }

  private final class TimedServerCall[ReqT, RespT](
      delegate: ServerCall[ReqT, RespT],
      timer: Timer.Context,
  ) extends SimpleForwardingServerCall[ReqT, RespT](delegate) {
    override def close(status: Status, trailers: Metadata): Unit = {
      delegate.close(status, trailers)
      timer.stop()
      ()
    }
  }

} 
Example 2
Source File: InMemoryRepository.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository.impl

import dcos.metronome.repository.Repository
import mesosphere.marathon.StoreCommandFailedException

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.util.control.NonFatal


class InMemoryRepository[Id, Model] extends Repository[Id, Model] {

  val models: TrieMap[Id, Model] = TrieMap.empty[Id, Model]

  override def ids(): Future[Iterable[Id]] = Future.successful(models.keys)

  override def update(id: Id, change: Model => Model): Future[Model] = {
    models.get(id) match {
      case Some(model) =>
        try {
          val changed = change(model)
          models.update(id, changed)
          Future.successful(changed)
        } catch {
          case NonFatal(ex) => Future.failed(ex)
        }
      case None =>
        Future.failed(PersistenceFailed(id.toString, "No model with this id"))
    }
  }

  override def get(id: Id): Future[Option[Model]] = Future.successful(models.get(id))

  override def delete(id: Id): Future[Boolean] = Future.successful(models.remove(id).isDefined)

  override def create(id: Id, create: Model): Future[Model] = {
    models.get(id) match {
      case Some(_) =>
        Future.failed(new StoreCommandFailedException("Model with id already exists."))
      case None =>
        models += id -> create
        Future.successful(create)
    }
  }
} 
Example 3
Source File: DeferredExecutionManager.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.execution

import org.apache.toree.kernel.protocol.v5.UUID
import org.apache.toree.utils.LogLike

import scala.collection.concurrent.{Map, TrieMap}

object DeferredExecutionManager extends LogLike{
  private val executionMap: Map[UUID, DeferredExecution] = TrieMap[UUID, DeferredExecution]()
  
  def add(id: UUID, de: DeferredExecution): Unit = executionMap += (id -> de)

  def get(id: UUID): Option[DeferredExecution] = executionMap.get(id)

  def remove(de: DeferredExecution): Unit = {
    val optionalDE: Option[(UUID, DeferredExecution)] = executionMap.find {
      case (id: UUID, searchedDe: DeferredExecution) => {
        de.eq(searchedDe)
    }}
    optionalDE match {
      case None =>
        logger.warn("Searched and did not find deferred execution!")
      case Some((id, foundDe)) =>
        executionMap.remove(id)
    }
  }

} 
Example 4
Source File: TcpServiceImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate_server

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}
import java.util.UUID
import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Queue

@SuppressWarnings(Array("org.wartremover.warts.Var")) class ChannelHandler(
  channel: AsynchronousSocketChannel, unregister: ()=>Unit, fail: Throwable=>Unit,
  executor: ScheduledExecutorService, timeout: Long, val compressor: Option[Compressor]
) extends CompletionHandler[Integer,Unit] with SenderToAgent {
  private var queue: Queue[Array[Byte]] = Queue.empty
  private var activeElement: Option[ByteBuffer] = None
  private var purge: Option[ScheduledFuture[_]] = None
  private def startWrite(): Unit =
    queue.dequeueOption.foreach{ case (element,nextQueue) =>
      queue = nextQueue
      activeElement = Option(ByteBuffer.wrap(element))
      channel.write[Unit](activeElement.get, (), this)
    }
  def add(data: Array[Byte]): Unit = synchronized {
    queue = queue.enqueue(data)
    if(activeElement.isEmpty) startWrite()
  }
  def completed(result: Integer, att: Unit): Unit = Trace {
    synchronized {
      if(activeElement.get.hasRemaining) channel.write[Unit](activeElement.get, (), this)
      else {
        purge.foreach(_.cancel(false))
        purge = Option(executor.schedule(new Runnable {
          def run(): Unit = close()
        },timeout,TimeUnit.SECONDS))
        activeElement = None
        startWrite()
      }
    }
  }
  def failed(exc: Throwable, att: Unit): Unit = {
    fail(exc)
    close()
  }
  def close(): Unit = {
    unregister()
    channel.close()  //does close block?
  }
}

class TcpServerImpl(
  port: Int, tcpHandler: TcpHandler, timeout: Long, compressorFactory: StreamCompressorFactory,
  channels: TrieMap[String,ChannelHandler] = TrieMap()
) extends TcpServer with Executable with LazyLogging {
  def getSender(connectionKey: String): Option[SenderToAgent] =
    channels.get(connectionKey)
  def run(): Unit = concurrent.blocking{
    tcpHandler.beforeServerStart()
    val address = new InetSocketAddress(port)
    val listener = AsynchronousServerSocketChannel.open().bind(address)
    val executor = Executors.newScheduledThreadPool(1)
    listener.accept[Unit]((), new CompletionHandler[AsynchronousSocketChannel,Unit] {
      def completed(ch: AsynchronousSocketChannel, att: Unit): Unit = Trace {
        listener.accept[Unit]((), this)
        val key = UUID.randomUUID.toString
        val sender = new ChannelHandler(ch, {() =>
          assert(channels.remove(key).nonEmpty)
          tcpHandler.afterDisconnect(key)
        }, { error =>
          logger.error("channel",error)
        }, executor, timeout, compressorFactory.create())
        assert(channels.put(key,sender).isEmpty)
        tcpHandler.afterConnect(key, sender)
      }
      def failed(exc: Throwable, att: Unit): Unit = logger.error("tcp",exc) //! may be set status-finished
    })
  }
} 
Example 5
Source File: OperationExecutionDispatcher.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.deeplang

import scala.collection.concurrent.TrieMap
import scala.concurrent.{Future, Promise}

import ai.deepsense.commons.models.Id

class OperationExecutionDispatcher {

  import OperationExecutionDispatcher._

  private val operationEndPromises: TrieMap[OperationId, Promise[Result]] = TrieMap.empty

  def executionStarted(workflowId: Id, nodeId: Id): Future[Result] = {
    val promise: Promise[Result] = Promise()
    require(operationEndPromises.put((workflowId, nodeId), promise).isEmpty)
    promise.future
  }

  def executionEnded(workflowId: Id, nodeId: Id, result: Result): Unit = {
    val promise = operationEndPromises.remove((workflowId, nodeId))
    require(promise.isDefined)
    promise.get.success(result)
  }
}

object OperationExecutionDispatcher {
  type OperationId = (Id, Id)
  type Error = String
  type Result = Either[Error, Unit]
} 
Example 6
Source File: JVMObjectTracker.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off

private[r] object JVMObjectTracker {
  @transient
  protected lazy val logger: Logger = LoggerFactory.getLogger(getClass.getName)
  private[this] val objMap = new TrieMap[String, Object]
  private[this] val objCounter = new AtomicInteger(0)

  def getObject(id: String): Object = {
    logger.info(s"Get object at $id")
    objMap(id)
  }

  def get(id: String): Option[Object] = {
    logger.info(s"Get object at $id")
    objMap.get(id)
  }

  def put(obj: Object): String = {
    val objId = objCounter.getAndIncrement.toString
    val objName = obj.getClass.getName
    logger.info(s"Puts $objName at $objId ")
    objMap.put(objId, obj)
    objId
  }

  def remove(id: String): Option[Object] = {
    logger.info(s"Removed $id")
    objMap.remove(id)
  }

} 
Example 7
Source File: InMemoryWorkflowStorage.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.workflowmanager.storage.inmemory

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.language.postfixOps

import org.joda.time.DateTime
import spray.json._

import ai.deepsense.deeplang.CatalogRecorder
import ai.deepsense.models.json.graph.GraphJsonProtocol.GraphReader
import ai.deepsense.models.workflows.Workflow
import ai.deepsense.workflowmanager.storage.{WorkflowFullInfo, WorkflowRaw, WorkflowStorage}


class InMemoryWorkflowStorage extends WorkflowStorage {
  override val graphReader = new GraphReader(CatalogRecorder.resourcesCatalogRecorder.catalogs.operations)

  private val workflows: TrieMap[Workflow.Id, WorkflowRaw] = TrieMap()
  private val now = DateTime.now()

  def createRaw(id: Workflow.Id, workflow: JsValue, ownerId: String, ownerName: String): Future[Unit] = {
    save(id, workflow, Some(ownerId), Some(ownerName))
  }

  override def updateRaw(id: Workflow.Id, workflow: JsValue): Future[Unit] = {
    save(id, workflow, None, None)
  }

  private def save(id: Workflow.Id, workflow: JsValue,
      ownerId: Option[String], ownerName: Option[String]): Future[Unit] = {
    def withNewWorkflow(old: Option[WorkflowRaw]): WorkflowRaw =
      WorkflowRaw(workflow,
        old.map(_.created).getOrElse(DateTime.now),
        old.map(_.updated).getOrElse(DateTime.now),
        ownerId orElse old.map(_.ownerId) get,
        ownerName orElse old.map(_.ownerName) get)

    var oldEntry = workflows.get(id)
    var newEntry = withNewWorkflow(oldEntry)

    while (!workflows.replace(id, oldEntry.orNull, newEntry)) {
      oldEntry = workflows.get(id)
      newEntry = withNewWorkflow(oldEntry)
    }
    Future.successful(())
  }

  override def get(id: Workflow.Id): Future[Option[WorkflowFullInfo]] = {
    Future.successful(workflows.get(id).map(rawWorkflowToFullWorkflow))
  }

  override def getAllRaw: Future[Map[Workflow.Id, WorkflowRaw]] = {
    Future.successful(workflows.toMap)
  }

  override def delete(id: Workflow.Id): Future[Unit] = {
    Future.successful(workflows.remove(id))
  }
} 
Example 8
Source File: LineBreakpoints.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.infra.IdGenerator
import com.programmaticallyspeaking.ncd.messaging.{Observable, SerializedSubject}
import org.slf4s.Logging

import scala.collection.concurrent.TrieMap

class LineBreakpoints extends Logging {
  private val breakpointIdGenerator = new IdGenerator("ndb")
  private val byId = TrieMap[String, LineBreakpoint]()
  private val resolvedSubject = new SerializedSubject[BreakpointResolved]()

  def resolvedBreakpoints: Observable[BreakpointResolved] = resolvedSubject

  def addBreakableLocations(script: Script, newLocations: Seq[BreakableLocation]): Unit = {
    // Go through active breakpoints that belong to the script
    // For each BL that matches the active breakpoint, add it
    val lineBreakpointsForScript = byId.values.filter(_.belongsTo(script))
    lineBreakpointsForScript.foreach { bp =>
      val toAdd = newLocations.filter(bp.oughtToContain)
      if (toAdd.nonEmpty) {
        val willBeResolved = bp.isUnresolved
        log.debug(s"Adding ${toAdd.size} breakable locations to breakpoint ${bp.id}")
        bp.addBreakableLocations(toAdd)
        if (willBeResolved) {
          // Hm, can there be more than one location here?
          val first = toAdd.head
          val item = BreakpointResolved(bp.id, LocationInScript(first.script.id, first.scriptLocation))
          log.info(s"Resolving breakpoint ${bp.id} with location ${first.scriptLocation} in script ${script.id}")
          resolvedSubject.onNext(item)
        }
      }
    }
  }

  def forBreakableLocation(bl: BreakableLocation): Option[LineBreakpoint] = {
    byId.values.find(_.contains(bl))
  }

  def onBreakpointHit(activeBreakpoint: LineBreakpoint): Unit = {
    if (activeBreakpoint.isOneOff) {
      log.trace(s"Removing one-off breakpoint with id ${activeBreakpoint.id}")
      removeBreakpoint(activeBreakpoint)
    }
  }

  def removeAll(): Unit = {
    //TODO: Not very atomic, this
    byId.foreach(e => e._2.remove())
    byId.clear()
  }

  
  def removeUniqueForScript(script: Script): Seq[LineBreakpoint] = {
    // TODO: Not atomic...
    val forScript = byId.values.filter(_.belongsUniquelyTo(script))
    forScript.foreach { bp =>
      bp.remove()
      byId -= bp.id
    }
    forScript.toSeq
  }

  def removeById(id: String): Unit = {
    byId.get(id) match {
      case Some(activeBp) =>
        log.info(s"Removing breakpoint with id $id")
        removeBreakpoint(activeBp)
      case None =>
        log.warn(s"Got request to remove an unknown breakpoint with id $id")
    }
  }

  private def removeBreakpoint(activeBreakpoint: LineBreakpoint): Unit = {
    activeBreakpoint.remove()
    byId -= activeBreakpoint.id
  }

  def create(id: ScriptIdentity, location: ScriptLocation, locations: Seq[BreakableLocation], condition: Option[String], oneOff: Boolean): LineBreakpoint = {
    val activeBp = new LineBreakpoint(breakpointIdGenerator.next, locations, condition, id, location, oneOff)
    byId += (activeBp.id -> activeBp)
    activeBp
  }
} 
Example 9
Source File: BreakableLocations.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import com.programmaticallyspeaking.ncd.host.{IdBasedScriptIdentity, Script, ScriptIdentity}
import com.sun.jdi.Location
import org.slf4s.Logging

import scala.collection.concurrent.TrieMap

class BreakableLocations(scripts: Scripts) extends Logging {
  import JDIExtensions._

  private val breakableLocationsByScriptUrl = TrieMap[String, Seq[BreakableLocation]]()

  
  def atLine(id: ScriptIdentity, lineNumber: Int): Seq[BreakableLocation] = findScriptUrl(id) match {
    case Some(scriptUrl) =>
      breakableLocationsByScriptUrl.get(scriptUrl).map { breakableLocations =>
        breakableLocations.filter(_.scriptLocation.lineNumber1Based == lineNumber)
      }.getOrElse(Seq.empty)
    case None =>
      id match {
        case IdBasedScriptIdentity(scriptId) =>
          throw new IllegalArgumentException("Unknown script: " + scriptId)
        case _ =>
          // This is not an error since the URL-based ID may match a future script in which case we will
          // emit a BreakpointResolved event at that time.
          Seq.empty
      }
  }

  private def findScriptUrl(id: ScriptIdentity): Option[String] = scripts.byId(id).map(_.url.toString)

  private def gatherBreakableLocations(script: Script, locations: Seq[Location]): Seq[BreakableLocation] = {
    // Find all potentially breakable lines. Create breakable locations from actual locations. Then create
    // candidates for the potentially breakable lines that weren't covered. Such a line may for example belong to
    // a function that hasn't been executed yet.
    locations.map(l => new BreakableLocation(script, l))
  }

} 
Example 10
Source File: FieldMetrics.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph

import sangria.execution._
import sangria.schema._
import spray.json.DefaultJsonProtocol._
import spray.json._
import com.typesafe.scalalogging.LazyLogging
import cool.graph.shared.logging.{LogData, LogKey}

import scala.collection.concurrent.TrieMap

class FieldMetricsMiddleware
    extends Middleware[RequestContextTrait]
    with MiddlewareAfterField[RequestContextTrait]
    with MiddlewareErrorField[RequestContextTrait]
    with LazyLogging {

  type QueryVal = TrieMap[String, List[Int]]
  type FieldVal = Long

  def beforeQuery(context: MiddlewareQueryContext[RequestContextTrait, _, _]) =
    TrieMap()
  def afterQuery(queryVal: QueryVal, context: MiddlewareQueryContext[RequestContextTrait, _, _]) = {

    import TimingProtocol._

    val total  = queryVal.foldLeft(0)(_ + _._2.sum)
    val sumMap = queryVal.toMap.mapValues(_.sum) + ("__total" -> total)
//    logger.info(
//      LogData(
//        key = LogKey.RequestMetricsFields,
//        requestId = context.ctx.requestId,
//        clientId = Some(context.ctx.clientId),
//        projectId = context.ctx.projectId,
//        payload = Some(sumMap)
//      ).json)
  }

  def beforeField(queryVal: QueryVal, mctx: MiddlewareQueryContext[RequestContextTrait, _, _], ctx: Context[RequestContextTrait, _]) =
    continue(System.currentTimeMillis())

  def afterField(queryVal: QueryVal,
                 fieldVal: FieldVal,
                 value: Any,
                 mctx: MiddlewareQueryContext[RequestContextTrait, _, _],
                 ctx: Context[RequestContextTrait, _]) = {
    val key  = ctx.parentType.name + "." + ctx.field.name
    val list = queryVal.getOrElse(key, Nil)

    queryVal.update(key, list :+ (System.currentTimeMillis() - fieldVal).toInt)
    None
  }

  def fieldError(queryVal: QueryVal,
                 fieldVal: FieldVal,
                 error: Throwable,
                 mctx: MiddlewareQueryContext[RequestContextTrait, _, _],
                 ctx: Context[RequestContextTrait, _]) = {
    val key    = ctx.parentType.name + "." + ctx.field.name
    val list   = queryVal.getOrElse(key, Nil)
    val errors = queryVal.getOrElse("ERROR", Nil)

    queryVal.update(key, list :+ (System.currentTimeMillis() - fieldVal).toInt)
    queryVal.update("ERROR", errors :+ 1)
  }
} 
Example 11
Source File: RequestContext.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph

import cool.graph.client.FeatureMetric.FeatureMetric
import cool.graph.client.{MutationQueryWhitelist}
import cool.graph.shared.models.Client
import scaldi.{Injectable, Injector}

import scala.collection.concurrent.TrieMap

trait RequestContextTrait {
  val requestId: String
  val requestIp: String
  val clientId: String
  val projectId: Option[String]
  val log: Function[String, Unit]
  var graphcoolHeader: Option[String] = None

  // The console always includes the header `X-GraphCool-Source` with the value `dashboard:[sub section]`
  def isFromConsole = graphcoolHeader.exists(header => header.contains("dashboard") || header.contains("console"))

  val isSubscription: Boolean = false
  val mutationQueryWhitelist  = new MutationQueryWhitelist()

  private var featureMetrics: TrieMap[String, Unit] = TrieMap()

  def addFeatureMetric(featureMetric: FeatureMetric): Unit = featureMetrics += (featureMetric.toString -> Unit)
  def listFeatureMetrics: List[String]                     = featureMetrics.keys.toList

}

trait SystemRequestContextTrait extends RequestContextTrait {
  override val clientId: String = client.map(_.id).getOrElse("")
  val client: Option[Client]
}

case class RequestContext(clientId: String, requestId: String, requestIp: String, log: Function[String, Unit], projectId: Option[String] = None)(
    implicit inj: Injector)
    extends RequestContextTrait
    with Injectable {} 
Example 12
Source File: OffsetSaveSink.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package com.microsoft.azure.iot.iothubreact.sinks

import java.util.concurrent.CompletionStage

import akka.Done
import akka.actor.ActorRef
import akka.japi.function.Procedure
import akka.stream.javadsl.{Sink ⇒ JavaSink}
import akka.stream.scaladsl.{Sink ⇒ ScalaSink}
import com.microsoft.azure.iot.iothubreact.checkpointing.CheckpointService.UpdateOffset
import com.microsoft.azure.iot.iothubreact.checkpointing.{CheckpointActorSystem, IOffsetLoader}
import com.microsoft.azure.iot.iothubreact.config.IConfiguration
import com.microsoft.azure.iot.iothubreact.{Logger, MessageFromDevice}

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future

private[iothubreact] final case class OffsetSaveSink(
    config: IConfiguration,
    offsetLoader: IOffsetLoader)
  extends ISink[MessageFromDevice]
    with Logger {

  // The service responsible for writing the offset to the storage
  lazy val checkpointService = (0 until config.connect.iotHubPartitions).map {
    p ⇒
      p → CheckpointActorSystem(config.checkpointing).getCheckpointService(p)
  }(collection.breakOut): Map[Int, ActorRef]

  // The offset stored (value) for each partition (key)
  val current: TrieMap[Int, Long] = TrieMap()

  // Initialize `current` with the offsets in the storage
  offsetLoader.GetSavedOffsets.foreach {
    case (a, c) ⇒ current += a → c.toLong
  }

  def scalaSink(): ScalaSink[MessageFromDevice, scala.concurrent.Future[Done]] = {
    ScalaSink.foreach[MessageFromDevice] {
      doWrite
    }
  }

  def javaSink(): JavaSink[MessageFromDevice, CompletionStage[Done]] = {
    JavaSink.foreach[MessageFromDevice] {
      JavaSinkProcedure
    }
  }

  // Required for Scala 2.11
  private[this] object JavaSinkProcedure extends Procedure[MessageFromDevice] {
    @scala.throws[Exception](classOf[Exception])
    override def apply(m: MessageFromDevice): Unit = {
      doWrite(m)
    }
  }

  private[this] def doWrite(m: MessageFromDevice) = {
    m.runtimeInfo.partitionInfo.partitionNumber.map {
      p =>
        synchronized {
          val os: Long = m.offset.toLong
          val cur: Long = current.getOrElse(p, -1)
          if (os > cur) {
            log.debug(s"Committing offset ${m.offset} on partition ${p}")
            checkpointService(p) ! UpdateOffset(m.offset)
            current += p → os
          } else {
            log.debug(s"Ignoring offset ${m.offset} since it precedes ${cur}")
            Future successful (Done)
          }
        }
    }
  }
} 
Example 13
Source File: Mappers.scala    From common4s   with Apache License 2.0 5 votes vote down vote up
package commons.mapper

import java.sql.ResultSet

import scala.collection.JavaConversions.{ mapAsJavaMap, mapAsScalaMap }
import scala.collection.concurrent.TrieMap


object Mappers {

	private val beanToMapMapperCache = new TrieMap[Class[_], BeanToMapMapper]
	private val mapToBeanMapperCache = new TrieMap[Class[_], MapToBeanMapper]
	private val autoConvertTypeMapToBeanMapperCache = new TrieMap[Class[_], MapToBeanMapper]
	private val resultSetMapperCache = new TrieMap[Class[_], ResultSetMapper]

	def beanToMap(any : AnyRef) : collection.Map[String, Any] = {
		val map = beanToMapMapperCache
			.getOrElseUpdate(any.getClass, BeanToMapMapper.createMapper(any.getClass))
			.map(any)

		mapAsScalaMap(map)
	}

	def mapToBean[T](map : collection.Map[String, Any])(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		mapToBean(map, false)
	}

	def mapToBean[T](map : collection.Map[String, Any], autoConvert : Boolean)(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		val clazz = classTag.runtimeClass

		val mapper =
			if (!autoConvert) mapToBeanMapperCache.getOrElseUpdate(clazz, MapToBeanMapper.createMapper(classTag.runtimeClass))
			else autoConvertTypeMapToBeanMapperCache.getOrElseUpdate(clazz, MapToBeanMapper.createMapper(classTag.runtimeClass, true))

		mapper.map(mapAsJavaMap(map)).asInstanceOf[T]
	}

	def resultSetToBean[T](rs : ResultSet)(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		val clazz = classTag.runtimeClass
		resultSetMapperCache.getOrElseUpdate(clazz, ResultSetMapper.createMapper(clazz)).map(rs).asInstanceOf[T]
	}

	def resultSetToMap(rs : ResultSet) : collection.Map[String, Any] = {
		resultSetToBean[collection.Map[String, Any]](rs)
	}
} 
Example 14
Source File: AppRegistry.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.creational.singleton

import scala.collection.concurrent.{TrieMap, Map}

object AppRegistry {
  System.out.println("Registry initialization block called.")
  private val users: Map[String, String] = TrieMap.empty
  
  def addUser(id: String, name: String): Unit = {
    users.put(id, name)
  }
  
  def removeUser(id: String): Unit = {
    users.remove(id)
  }
  
  def isUserRegistered(id: String): Boolean =
    users.contains(id)
  
  def getAllUserNames(): List[String] =
    users.map(_._2).toList
}

object AppRegistryExample {
  def main(args: Array[String]): Unit = {
    System.out.println("Sleeping for 5 seconds.")
    Thread.sleep(5000)
    System.out.println("I woke up.")
    AppRegistry.addUser("1", "Ivan")
    AppRegistry.addUser("2", "John")
    AppRegistry.addUser("3", "Martin")
    System.out.println(s"Is user with ID=1 registered? ${AppRegistry.isUserRegistered("1")}")
    System.out.println("Removing ID=2")
    AppRegistry.removeUser("2")
    System.out.println(s"Is user with ID=2 registered? ${AppRegistry.isUserRegistered("2")}")
    System.out.println(s"All users registered are: ${AppRegistry.getAllUserNames().mkString(",")}")
  }
} 
Example 15
Source File: AppRegistry.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.creational.singleton

import scala.collection.concurrent.{TrieMap, Map}

object AppRegistry {
  System.out.println("Registry initialization block called.")
  private val users: Map[String, String] = TrieMap.empty
  
  def addUser(id: String, name: String): Unit = {
    users.put(id, name)
  }
  
  def removeUser(id: String): Unit = {
    users.remove(id)
  }
  
  def isUserRegistered(id: String): Boolean =
    users.contains(id)
  
  def getAllUserNames(): List[String] =
    users.map(_._2).toList
}

object AppRegistryExample {
  def main(args: Array[String]): Unit = {
    System.out.println("Sleeping for 5 seconds.")
    Thread.sleep(5000)
    System.out.println("I woke up.")
    AppRegistry.addUser("1", "Ivan")
    AppRegistry.addUser("2", "John")
    AppRegistry.addUser("3", "Martin")
    System.out.println(s"Is user with ID=1 registered? ${AppRegistry.isUserRegistered("1")}")
    System.out.println("Removing ID=2")
    AppRegistry.removeUser("2")
    System.out.println(s"Is user with ID=2 registered? ${AppRegistry.isUserRegistered("2")}")
    System.out.println(s"All users registered are: ${AppRegistry.getAllUserNames().mkString(",")}")
  }
} 
Example 16
Source File: TrieMapCache.scala    From sangria   with Apache License 2.0 5 votes vote down vote up
package sangria.util

import scala.collection.concurrent.TrieMap

class TrieMapCache[Key, Value] extends Cache[Key, Value] {
  private val cache = TrieMap[Key, Value]()

  def size = cache.size

  def contains(key: Key) = cache.contains(key)
  def apply(key: Key) = cache(key)
  def get(key: Key) = cache.get(key)
  def getOrElse(key: Key, default: => Value) = cache.getOrElse(key, default)
  def update(key: Key, value: Value) = cache.update(key, value)
  def remove(key: Key) = cache.remove(key)
  def clear() = cache.clear()

  def getOrElseUpdate(key: Key, fn: => Value) = cache.getOrElseUpdate(key, fn)
  def find(fn: (Key, Value) => Boolean) = cache.find {case (key, value) => fn(key, value)}
  def mapToSet[R](fn: (Key, Value) => R) = cache.map {case (key, value) => fn(key, value)}.toSet
  def mapValues[R](fn: Value => R) = cache.mapValues(fn).toMap
  def keyExists(fn: Key => Boolean) = cache.keySet.exists(fn)
  def forEachValue(fn: Value => Unit) = cache.values.foreach(fn)
  def removeKeys(fn: Key => Boolean) = cache.keys.toVector.foreach(key => if (fn(key)) cache.remove(key))

  def canEqual(other: Any): Boolean = other.isInstanceOf[TrieMapCache[_, _]]

  override def equals(other: Any): Boolean = other match {
    case that: TrieMapCache[_, _] => (that canEqual this) && cache == that.cache
    case _ => false
  }

  override def hashCode(): Int =
    31 * cache.hashCode()
} 
Example 17
Source File: JVMObjectTracker.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off

private[r] object JVMObjectTracker {
  @transient
  protected lazy val logger: Logger = LoggerFactory.getLogger(getClass.getName)
  private[this] val objMap = new TrieMap[String, Object]
  private[this] val objCounter = new AtomicInteger(0)

  def getObject(id: String): Object = {
    logger.info(s"Get object at  $id")
    objMap(id)
  }

  def get(id: String): Option[Object] = {
    logger.info(s"Get object at $id")
    objMap.get(id)
  }

  def put(obj: Object): String = {
    val objId = objCounter.getAndIncrement.toString
    val objName = obj.getClass.getName
    logger.info(s"Puts $objName at $objId ")
    objMap.put(objId, obj)
    objId
  }

  def remove(id: String): Option[Object] = {
    logger.info(s"Removed $id")
    objMap.remove(id)
  }

} 
Example 18
Source File: JobSpecServiceFixture.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package jobspec.impl

import java.time.Clock

import dcos.metronome.jobspec.JobSpecService
import dcos.metronome.model.{JobId, JobSpec, ScheduleSpec}

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.util.control.NonFatal

object JobSpecServiceFixture {

  def simpleJobSpecService(testClock: Clock = Clock.systemUTC()): JobSpecService =
    new JobSpecService {
      val specs = TrieMap.empty[JobId, JobSpec]
      import Future._
      override def getJobSpec(id: JobId): Future[Option[JobSpec]] = successful(specs.get(id))

      override def createJobSpec(jobSpec: JobSpec): Future[JobSpec] = {
        specs.get(jobSpec.id) match {
          case Some(_) =>
            failed(JobSpecAlreadyExists(jobSpec.id))
          case None =>
            specs += jobSpec.id -> jobSpecWithMockedTime(jobSpec)
            successful(jobSpec)
        }
      }

      private def jobSpecWithMockedTime(jobSpec: JobSpec): JobSpec =
        jobSpec.copy(schedules =
          jobSpec.schedules.map(s =>
            new ScheduleSpec(s.id, s.cron, s.timeZone, s.startingDeadline, s.concurrencyPolicy, s.enabled) {
              override def clock: Clock = testClock
            }
          )
        )

      override def updateJobSpec(id: JobId, update: JobSpec => JobSpec): Future[JobSpec] = {
        specs.get(id) match {
          case Some(spec) =>
            try {
              val changed = update(spec)
              specs.update(id, jobSpecWithMockedTime(changed))
              successful(changed)
            } catch {
              case NonFatal(ex) => failed(ex)
            }
          case None => failed(JobSpecDoesNotExist(id))
        }
      }

      override def listJobSpecs(filter: JobSpec => Boolean): Future[Iterable[JobSpec]] = {
        successful(specs.values.filter(filter))
      }

      override def deleteJobSpec(id: JobId): Future[JobSpec] = {
        specs.get(id) match {
          case Some(spec) =>
            specs -= id
            successful(spec)
          case None => failed(JobSpecDoesNotExist(id))
        }
      }
    }
} 
Example 19
Source File: JobRunServiceFixture.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package jobrun

import java.time.Clock

import dcos.metronome.model._
import mesosphere.marathon.core.task.Task

import scala.collection.concurrent.TrieMap
import scala.concurrent.duration.Duration
import scala.concurrent.{Future, Promise}

object JobRunServiceFixture {

  def simpleJobRunService(): JobRunService =
    new JobRunService {
      val specs = TrieMap.empty[JobRunId, StartedJobRun]

      override def getJobRun(jobRunId: JobRunId): Future[Option[StartedJobRun]] = {
        Future.successful(specs.get(jobRunId))
      }

      override def killJobRun(jobRunId: JobRunId): Future[StartedJobRun] = {
        specs.get(jobRunId) match {
          case Some(value) => Future.successful(value)
          case None => Future.failed(JobRunDoesNotExist(jobRunId))
        }
      }

      override def activeRuns(jobSpecId: JobId): Future[Iterable[StartedJobRun]] = {
        Future.successful(specs.values.filter(_.jobRun.jobSpec.id == jobSpecId))
      }
      override def listRuns(filter: JobRun => Boolean): Future[Iterable[StartedJobRun]] = {
        Future.successful(specs.values.filter(r => filter(r.jobRun)))
      }
      override def startJobRun(jobSpec: JobSpec, schedule: Option[ScheduleSpec] = None): Future[StartedJobRun] = {
        val startingDeadline: Option[Duration] = schedule.map(_.startingDeadline)
        val run = JobRun(
          JobRunId(jobSpec),
          jobSpec,
          JobRunStatus.Active,
          Clock.systemUTC().instant(),
          None,
          startingDeadline,
          Map.empty[Task.Id, JobRunTask]
        )
        val startedRun = StartedJobRun(run, Promise[JobResult].future)
        specs += run.id -> startedRun
        Future.successful(startedRun)
      }
    }
} 
Example 20
Source File: BlockchainSimulationSpecification.scala    From sigmastate-interpreter   with MIT License 5 votes vote down vote up
package sigmastate.utxo.blockchain

import java.io.{File, FileWriter}

import org.scalacheck.Gen
import sigmastate.Values.{BooleanConstant, ErgoTree, GetVarBoolean, TrueLeaf}
import sigmastate.helpers.{ContextEnrichingTestProvingInterpreter, ErgoLikeTestProvingInterpreter}
import sigmastate.interpreter.ContextExtension
import sigmastate.utxo.blockchain.BlockchainSimulationTestingCommons._

import scala.collection.concurrent.TrieMap
import scala.util.Random


class BlockchainSimulationSpecification extends BlockchainSimulationTestingCommons {

  implicit lazy val IR = new TestingIRContext

  property("apply one valid block") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val block = generateBlock(state, miner, 0)
    val updStateTry = state.applyBlock(block)
    updStateTry.isSuccess shouldBe true
  }

  property("too costly block") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val block = generateBlock(state, miner, 0)
    val updStateTry = state.applyBlock(block, maxCost = 1)
    updStateTry.isSuccess shouldBe false
  }

  property("apply many blocks") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    checkState(state, miner, 0, randomDeepness)
  }

  property("apply many blocks with enriched context") {
    val state = ValidationState.initialState()
    val miner = new ErgoLikeTestProvingInterpreter()
    val varId = 1.toByte
    val prop = GetVarBoolean(varId).get.toSigmaProp
    // unable to spend boxes without correct context extension
    an[RuntimeException] should be thrownBy checkState(state, miner, 0, randomDeepness, Some(prop))

    // spend boxes with context extension
    val contextExtension = ContextExtension(Map(varId -> TrueLeaf))
    checkState(state, miner, 0, randomDeepness, Some(prop), contextExtension)
  }

  ignore(s"benchmarking applying many blocks (!!! ignored)") {
    val results = new TrieMap[Int, Long]

    def bench(numberOfBlocks: Int): Unit = {

      val state = ValidationState.initialState()
      val miner = new ContextEnrichingTestProvingInterpreter()

      val (_, time) = (0 until numberOfBlocks).foldLeft(state -> 0L) { case ((s, timeAcc), h) =>
        val b = generateBlock(state, miner, h)

        val t0 = System.currentTimeMillis()
        val updStateTry = s.applyBlock(b)
        val t = System.currentTimeMillis()

        updStateTry shouldBe 'success
        updStateTry.get -> (timeAcc + (t - t0))
      }

      println(s"Total time for $numberOfBlocks blocks: $time ms")
      results.put(numberOfBlocks, time)
    }

    bench(100)
    bench(200)
    bench(300)
    bench(400)

    printResults(results.toMap)

    def printResults(results: Map[Int, Long]): Unit = {
      val file = new File("target/bench")
      file.mkdirs()
      val writer = new FileWriter(s"target/bench/result.csv", false)
      val sorted = results.toList.sortBy { case (i, _) => i }
      val header = sorted.map(_._1).mkString(",")
      writer.write(s"$header\n")
      val values = sorted.map(_._2).mkString(",")
      writer.write(s"$values\n")
      writer.flush()
      writer.close()
    }
  }
} 
Example 21
Source File: UserRepositoryInMemoryInterpreter.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore
package infrastructure.repository.inmemory

import java.util.Random

import cats.implicits._
import cats.Applicative
import cats.data.OptionT
import domain.users.{User, UserRepositoryAlgebra}
import tsec.authentication.IdentityStore

import scala.collection.concurrent.TrieMap

class UserRepositoryInMemoryInterpreter[F[_]: Applicative]
    extends UserRepositoryAlgebra[F]
    with IdentityStore[F, Long, User] {
  private val cache = new TrieMap[Long, User]

  private val random = new Random

  def create(user: User): F[User] = {
    val id = random.nextLong
    val toSave = user.copy(id = id.some)
    cache += (id -> toSave)
    toSave.pure[F]
  }

  def update(user: User): OptionT[F, User] = OptionT {
    user.id.traverse { id =>
      cache.update(id, user)
      user.pure[F]
    }
  }

  def get(id: Long): OptionT[F, User] =
    OptionT.fromOption(cache.get(id))

  def delete(id: Long): OptionT[F, User] =
    OptionT.fromOption(cache.remove(id))

  def findByUserName(userName: String): OptionT[F, User] =
    OptionT.fromOption(cache.values.find(u => u.userName == userName))

  def list(pageSize: Int, offset: Int): F[List[User]] =
    cache.values.toList.sortBy(_.lastName).slice(offset, offset + pageSize).pure[F]

  def deleteByUserName(userName: String): OptionT[F, User] =
    OptionT.fromOption(
      for {
        user <- cache.values.find(u => u.userName == userName)
        removed <- cache.remove(user.id.get)
      } yield removed,
    )
}

object UserRepositoryInMemoryInterpreter {
  def apply[F[_]: Applicative]() =
    new UserRepositoryInMemoryInterpreter[F]
} 
Example 22
Source File: OrderRepositoryInMemoryInterpreter.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore
package infrastructure.repository.inmemory

import scala.collection.concurrent.TrieMap
import scala.util.Random

import cats._
import cats.implicits._
import domain.orders.{Order, OrderRepositoryAlgebra}

class OrderRepositoryInMemoryInterpreter[F[_]: Applicative] extends OrderRepositoryAlgebra[F] {
  private val cache = new TrieMap[Long, Order]

  private val random = new Random

  def create(order: Order): F[Order] = {
    val toSave = order.copy(id = order.id.orElse(random.nextLong.some))
    toSave.id.foreach(cache.put(_, toSave))
    toSave.pure[F]
  }

  def get(orderId: Long): F[Option[Order]] =
    cache.get(orderId).pure[F]

  def delete(orderId: Long): F[Option[Order]] =
    cache.remove(orderId).pure[F]
}

object OrderRepositoryInMemoryInterpreter {
  def apply[F[_]: Applicative]() = new OrderRepositoryInMemoryInterpreter[F]()
} 
Example 23
Source File: PetRepositoryInMemoryInterpreter.scala    From scala-pet-store   with Apache License 2.0 5 votes vote down vote up
package io.github.pauljamescleary.petstore
package infrastructure.repository.inmemory

import scala.collection.concurrent.TrieMap
import scala.util.Random

import cats._
import cats.data.NonEmptyList
import cats.implicits._
import domain.pets.{Pet, PetRepositoryAlgebra, PetStatus}

class PetRepositoryInMemoryInterpreter[F[_]: Applicative] extends PetRepositoryAlgebra[F] {
  private val cache = new TrieMap[Long, Pet]

  private val random = new Random

  def create(pet: Pet): F[Pet] = {
    val id = random.nextLong
    val toSave = pet.copy(id = id.some)
    cache += (id -> pet.copy(id = id.some))
    toSave.pure[F]
  }

  def update(pet: Pet): F[Option[Pet]] = pet.id.traverse { id =>
    cache.update(id, pet)
    pet.pure[F]
  }

  def get(id: Long): F[Option[Pet]] = cache.get(id).pure[F]

  def delete(id: Long): F[Option[Pet]] = cache.remove(id).pure[F]

  def findByNameAndCategory(name: String, category: String): F[Set[Pet]] =
    cache.values
      .filter(p => p.name == name && p.category == category)
      .toSet
      .pure[F]

  def list(pageSize: Int, offset: Int): F[List[Pet]] =
    cache.values.toList.sortBy(_.name).slice(offset, offset + pageSize).pure[F]

  def findByStatus(statuses: NonEmptyList[PetStatus]): F[List[Pet]] =
    cache.values.filter(p => statuses.exists(_ == p.status)).toList.pure[F]

  def findByTag(tags: NonEmptyList[String]): F[List[Pet]] = {
    val tagSet = tags.toNes
    cache.values.filter(_.tags.exists(tagSet.contains(_))).toList.pure[F]
  }
}

object PetRepositoryInMemoryInterpreter {
  def apply[F[_]: Applicative]() = new PetRepositoryInMemoryInterpreter[F]()
} 
Example 24
Source File: InMemoryStore.scala    From slab   with Apache License 2.0 5 votes vote down vote up
package com.criteo.slab.lib

import java.time.format.{DateTimeFormatter, FormatStyle}
import java.time.temporal.ChronoUnit
import java.time.{Instant, ZoneId}
import java.util.concurrent.{Executors, TimeUnit}

import com.criteo.slab.core.{Codec, Context, Store}
import com.criteo.slab.lib.Values.Slo
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.util.Try


class InMemoryStore(
                     val expiryDays: Int = 30
                   ) extends Store[Any] {
  private val logger = LoggerFactory.getLogger(this.getClass)
  private val cache = TrieMap.empty[(String, Long), Any]
  private val scheduler = Executors.newSingleThreadScheduledExecutor()

  scheduler.scheduleAtFixedRate(InMemoryStore.createCleaner(cache, expiryDays, logger), 1, 1, TimeUnit.HOURS)
  logger.info(s"InMemoryStore started, entries expire in $expiryDays days")

  sys.addShutdownHook {
    logger.info(s"Shutting down...")
    scheduler.shutdown()
  }

  override def upload[T](id: String, context: Context, v: T)(implicit codec: Codec[T, Any]): Future[Unit] = {
    logger.debug(s"Uploading $id")
    Future.successful {
      cache.putIfAbsent((id, context.when.toEpochMilli), codec.encode(v))
      logger.info(s"Store updated, size: ${cache.size}")
    }
  }

  override def uploadSlo(id: String, context: Context, slo: Slo)(implicit codec: Codec[Slo, Any]): Future[Unit] = {
    upload[Slo](id, context, slo)
  }

  def fetchSloHistory(id: String, from: Instant, until: Instant)(implicit codec: Codec[Slo, Any]): Future[Seq[(Long, Slo)]] = {
    fetchHistory[Slo](id, from, until)(codec)
  }

  override def fetch[T](id: String, context: Context)(implicit codec: Codec[T, Any]): Future[Option[T]] = {
    logger.debug(s"Fetching $id")
    Future.successful {
      cache.get((id, context.when.toEpochMilli)) map { v =>
        codec.decode(v).get
      }
    }
  }

  override def fetchHistory[T](
                                id: String,
                                from: Instant,
                                until: Instant
                              )(implicit ev: Codec[T, Any]): Future[Seq[(Long, T)]] = {
    logger.debug(s"Fetching the history of $id from ${format(from)} until ${format(until)}, cache size: ${cache.size}")
    Future.successful {
      cache.withFilter { case ((_id, ts), _) =>
        _id == id && ts >= from.toEpochMilli && ts <= until.toEpochMilli
      }.map { case ((_, ts), repr) =>
        (ts, ev.decode(repr).get)
      }.toList
    }
  }

  private def format(i: Instant) = DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL)
    .withZone(ZoneId.systemDefault)
    .format(i)
}

object InMemoryStore {
  implicit def codec[T] = new Codec[T, Any] {
    override def encode(v: T): Any = v

    override def decode(v: Any): Try[T] = Try(v.asInstanceOf[T])
  }

  def createCleaner(cache: TrieMap[(String, Long), Any], expiryDays: Int, logger: Logger): Runnable = {
    object C extends Runnable {
      override def run(): Unit = {
        val expired = cache.filterKeys(_._2 <= Instant.now.minus(expiryDays, ChronoUnit.DAYS).toEpochMilli).keys
        logger.debug(s"${expired.size} out of ${cache.size} entries have expired, cleaning up...")
        cache --= expired
      }
    }
    C
  }
} 
Example 25
Source File: InMemoryStoreSpec.scala    From slab   with Apache License 2.0 5 votes vote down vote up
package com.criteo.slab.lib

import java.time.Instant
import java.time.temporal.ChronoUnit

import org.scalatest.{FlatSpec, Matchers}
import org.slf4j.LoggerFactory

import scala.collection.concurrent.TrieMap

class InMemoryStoreSpec extends FlatSpec with Matchers {
  val logger = LoggerFactory.getLogger(this.getClass)
  "Cleaner" should "remove expired entries" in {
    val cache = TrieMap.empty[(String, Long), Any]
    val cleaner = InMemoryStore.createCleaner(cache, 1, logger)

    cache += ("a", Instant.now.minus(2, ChronoUnit.DAYS).toEpochMilli) -> 1
    cache += ("b", Instant.now.minus(1, ChronoUnit.DAYS).toEpochMilli) -> 2
    cache += ("c", Instant.now.toEpochMilli) -> 3
    cleaner.run()
    cache.size shouldBe 1
    cache.head._1._1 shouldBe "c"
  }

} 
Example 26
Source File: ConnectionPool.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence.sqlconnectionpool

import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.dbcp2._

import scala.collection.concurrent.TrieMap

protected case class DataSourceConfiguration(url: String, user: String, password: String)

object ConnectionPool extends LazyLogging {

  protected val dataSources: TrieMap[DataSourceConfiguration, BasicDataSource] = TrieMap[DataSourceConfiguration, BasicDataSource]()

  def apply(url: String, user: String, password: String): BasicDataSource = this.synchronized {
    val conf = DataSourceConfiguration(url, user, password)
    dataSources.getOrElseUpdate(conf, {
      logger.info(s"create DataSource ${conf.url}")
      val datasource = new BasicDataSource()
      datasource.setUsername(user)
      datasource.setPassword(password)
      datasource.setUrl(url)
      datasource
    })
  }
} 
Example 27
Source File: HeartbeatClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.{ActorRef, Actor}
import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.utils.LogLike
import org.apache.toree.kernel.protocol.v5.UUID
import scala.collection.concurrent.{Map, TrieMap}
import scala.concurrent.duration._

object HeartbeatMessage {}

class HeartbeatClient(
  socketFactory : SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created new Heartbeat Client actor")
  implicit val timeout = Timeout(1.minute)

  val futureMap: Map[UUID, ActorRef] = TrieMap[UUID, ActorRef]()
  val socket = socketFactory.HeartbeatClient(context.system, self)

  override def receive: Receive = {
    // from Heartbeat
    case message: ZMQMessage =>
      val id = message.frames.map((byteString: ByteString) =>
        new String(byteString.toArray)).mkString("\n")
      logger.info(s"Heartbeat client receive:$id")
      futureMap(id) ! true
      futureMap.remove(id)

    // from SparkKernelClient
    case HeartbeatMessage =>
      import scala.concurrent.ExecutionContext.Implicits.global
      val id = java.util.UUID.randomUUID().toString
      futureMap += (id -> sender)
      logger.info(s"Heartbeat client send: $id")
      val future = socket ? ZMQMessage(ByteString(id.getBytes))
      future.onComplete {
        // future always times out because server "tells" response {
        case(_) => futureMap.remove(id)
      }
  }
} 
Example 28
Source File: DeferredExecutionManager.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.execution

import org.apache.toree.kernel.protocol.v5.UUID
import org.apache.toree.utils.LogLike

import scala.collection.concurrent.{Map, TrieMap}

object DeferredExecutionManager extends LogLike{
  private val executionMap: Map[UUID, DeferredExecution] = TrieMap[UUID, DeferredExecution]()
  
  def add(id: UUID, de: DeferredExecution): Unit = executionMap += (id -> de)

  def get(id: UUID): Option[DeferredExecution] = executionMap.get(id)

  def remove(de: DeferredExecution): Unit = {
    val optionalDE: Option[(UUID, DeferredExecution)] = executionMap.find {
      case (id: UUID, searchedDe: DeferredExecution) => {
        de.eq(searchedDe)
    }}
    optionalDE match {
      case None =>
        logger.warn("Searched and did not find deferred execution!")
      case Some((id, foundDe)) =>
        executionMap.remove(id)
    }
  }

} 
Example 29
Source File: CustomAuth.scala    From typed-schema   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.tschema
package examples
import akka.http.scaladsl.server.AuthenticationFailedRejection.CredentialsRejected
import akka.http.scaladsl.server.{AuthenticationFailedRejection, Route}
import akka.http.scaladsl.server.directives.Credentials
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport._
import derevo.circe.{decoder, encoder}
import derevo.derive
import ru.tinkoff.tschema.akkaHttp.{MkRoute, Serve}
import ru.tinkoff.tschema.examples.SpecialAuth.validateAuth
import ru.tinkoff.tschema.swagger.{SwaggerOps, SwaggerMapper}
import ru.tinkoff.tschema.swagger._
import syntax._
import ru.tinkoff.tschema.typeDSL.{DSLAtom, DSLAtomAuth}
import shapeless.ops.record.Selector
import shapeless.{HList, Witness => W}

import scala.collection.concurrent.TrieMap

object CustomAuth extends ExampleModule {
  override def route: Route         = MkRoute(api)(handler)
  override def swag: SwaggerBuilder = MkSwagger(api)

  implicit val auth = AuthMap(Map("kriwda" -> (true, "admin"), "oleg" -> (false, "notadmin")))

  def api =
    tagPrefix("adminka") |> queryParam[String]("userId") |>
      ((
        post |> validateAuth("userId", true) |> body[BanUser]("banUser") |> operation("ban") |> $$[Result]
      ) <> (
        get |> validateAuth("userId", false) |> operation("bans") |> $$[List[BanUser]]
      ))

  private val banned = TrieMap.empty[String, BanUser]

  object handler {
    def ban(banUser: BanUser, userId: String): Result = {
      banned.put(banUser.userToBan, banUser)
      Result(s"$userId ok")
    }
    def bans: List[BanUser] = banned.values.toList
  }
}

@derive(encoder, decoder, Swagger)
final case class BanUser(userToBan: String, description: Option[String], ttl: Option[Long])

@derive(encoder, decoder, Swagger)
final case class Result(message: String)

final case class AuthMap(values: Map[String, (Boolean, String)]) {
  def get(key: String, admin: Boolean): Option[String] =
    values.get(key).collect {
      case (adm, secret) if adm || (!admin) => secret
    }
}

class SpecialAuth[userVar , admin <: Boolean] extends DSLAtom

object SpecialAuth {
  def validateAuth[userVar, admin <: Boolean](
      userVar: W.Aux[userVar],
      admin: W.Aux[admin]
  ): SpecialAuth[userVar, admin] =
    new SpecialAuth

  import Serve.{Check, serveReadCheck}

  implicit def swagger[userVar, admin <: Boolean]: SwaggerMapper[SpecialAuth[userVar, admin]] =
    bearerAuth[String]("kriwda", "kriwda").swaggerMapper.as[SpecialAuth[userVar, admin]]


  import akka.http.scaladsl.server.Directives._

  implicit def serve[In <: HList, userVar, admin <: Boolean](
      implicit auth: AuthMap,
      admin: W.Aux[admin],
      select: Selector.Aux[In, userVar, String]
  ): Check[SpecialAuth[userVar, admin], In] =
    serveReadCheck[SpecialAuth[userVar, admin], userVar, String, In](userId =>
      authenticateOAuth2PF("kriwda", {
        case cred @ Credentials.Provided(_) if auth.get(userId, admin.value).exists(cred.verify) => userId
      }).tmap(_ => ()))
} 
Example 30
Source File: InMemorySink.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.io

import com.spotify.scio.values.SCollection
import com.spotify.scio.coders.Coder

import scala.collection.concurrent.TrieMap

private[scio] object InMemorySink {
  private val cache: TrieMap[String, Iterable[Any]] = TrieMap.empty

  def save[T: Coder](id: String, data: SCollection[T]): Unit = {
    require(data.context.isTest, "In memory sink can only be used in tests")
    cache += id -> Nil
    data.transform {
      _.groupBy(_ => ()).values
        .map { values =>
          cache += (id -> values)
          ()
        }
    }

    ()
  }

  def get[T](id: String): Iterable[T] = cache(id).asInstanceOf[Iterable[T]]
} 
Example 31
Source File: OperationExecutionDispatcher.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.deeplang

import scala.collection.concurrent.TrieMap
import scala.concurrent.{Future, Promise}

import io.deepsense.commons.models.Id

class OperationExecutionDispatcher {

  import OperationExecutionDispatcher._

  private val operationEndPromises: TrieMap[OperationId, Promise[Result]] = TrieMap.empty

  def executionStarted(workflowId: Id, nodeId: Id): Future[Result] = {
    val promise: Promise[Result] = Promise()
    require(operationEndPromises.put((workflowId, nodeId), promise).isEmpty)
    promise.future
  }

  def executionEnded(workflowId: Id, nodeId: Id, result: Result): Unit = {
    val promise = operationEndPromises.remove((workflowId, nodeId))
    require(promise.isDefined)
    promise.get.success(result)
  }
}

object OperationExecutionDispatcher {
  type OperationId = (Id, Id)
  type Error = String
  type Result = Either[Error, Unit]
} 
Example 32
Source File: LogCollector.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.testing

import ch.qos.logback.classic.Level
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.AppenderBase

import scala.beans.BeanProperty
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.reflect.ClassTag

object LogCollector {

  private val log =
    TrieMap
      .empty[String, TrieMap[String, mutable.Builder[(Level, String), Vector[(Level, String)]]]]

  def read[Test, Logger](
      implicit test: ClassTag[Test],
      logger: ClassTag[Logger]): IndexedSeq[(Level, String)] =
    log
      .get(test.runtimeClass.getName)
      .flatMap(_.get(logger.runtimeClass.getName))
      .fold(IndexedSeq.empty[(Level, String)])(_.result())

  def clear[Test](implicit test: ClassTag[Test]): Unit = {
    log.remove(test.runtimeClass.getName)
    ()
  }

}

final class LogCollector extends AppenderBase[ILoggingEvent] {

  @BeanProperty
  var test: String = _

  override def append(e: ILoggingEvent): Unit = {
    if (test == null) {
      addError("Test identifier undefined, skipping logging")
    } else {
      val log = LogCollector.log
        .getOrElseUpdate(test, TrieMap.empty)
        .getOrElseUpdate(e.getLoggerName, Vector.newBuilder)
      val _ = log.synchronized { log += e.getLevel -> e.getMessage }
    }
  }
} 
Example 33
Source File: NestedSemaphore.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common

import scala.collection.concurrent.TrieMap


  def releaseConcurrent(actionid: T, maxConcurrent: Int, memoryPermits: Int): Unit = {
    require(memoryPermits > 0, "cannot release negative or no permits")
    if (maxConcurrent == 1) {
      super.release(memoryPermits)
    } else {
      val concurrentSlots = actionConcurrentSlotsMap(actionid)
      val (memoryRelease, actionRelease) = concurrentSlots.release(1, true)
      //concurrent slots
      if (memoryRelease) {
        super.release(memoryPermits)
      }
      if (actionRelease) {
        actionConcurrentSlotsMap.remove(actionid)
      }
    }
  }
  //for testing
  def concurrentState = actionConcurrentSlotsMap.readOnlySnapshot()
} 
Example 34
Source File: ParticipantSessionManager.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testtool.infrastructure.participant

import io.grpc.netty.{NegotiationType, NettyChannelBuilder}
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.util.concurrent.DefaultThreadFactory
import org.slf4j.LoggerFactory

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}

private[infrastructure] final class ParticipantSessionManager {

  private[this] val logger = LoggerFactory.getLogger(classOf[ParticipantSession])

  private[this] val channels = TrieMap.empty[ParticipantSessionConfiguration, ParticipantSession]

  @throws[RuntimeException]
  private def create(
      config: ParticipantSessionConfiguration,
  )(implicit ec: ExecutionContext): ParticipantSession = {
    logger.info(s"Connecting to participant at ${config.host}:${config.port}...")
    val threadFactoryPoolName = s"grpc-event-loop-${config.host}-${config.port}"
    val daemonThreads = false
    val threadFactory: DefaultThreadFactory =
      new DefaultThreadFactory(threadFactoryPoolName, daemonThreads)
    logger.info(
      s"gRPC thread factory instantiated with pool '$threadFactoryPoolName' (daemon threads: $daemonThreads)",
    )
    val threadCount = Runtime.getRuntime.availableProcessors
    val eventLoopGroup: NioEventLoopGroup =
      new NioEventLoopGroup(threadCount, threadFactory)
    logger.info(
      s"gRPC event loop thread group instantiated with $threadCount threads using pool '$threadFactoryPoolName'",
    )
    val managedChannelBuilder = NettyChannelBuilder
      .forAddress(config.host, config.port)
      .eventLoopGroup(eventLoopGroup)
      .channelType(classOf[NioSocketChannel])
      .directExecutor()
      .usePlaintext()
    for (ssl <- config.ssl; sslContext <- ssl.client) {
      logger.info("Setting up managed communication channel with transport security")
      managedChannelBuilder
        .useTransportSecurity()
        .sslContext(sslContext)
        .negotiationType(NegotiationType.TLS)
    }
    managedChannelBuilder.maxInboundMessageSize(10000000)
    val managedChannel = managedChannelBuilder.build()
    logger.info(s"Connection to participant at ${config.host}:${config.port}")
    new ParticipantSession(config, managedChannel, eventLoopGroup)
  }

  def getOrCreate(
      configuration: ParticipantSessionConfiguration,
  )(implicit ec: ExecutionContext): Future[ParticipantSession] =
    Future(channels.getOrElseUpdate(configuration, create(configuration)))

  def close(configuration: ParticipantSessionConfiguration): Unit =
    channels.get(configuration).foreach(_.close())

  def closeAll(): Unit =
    for ((_, session) <- channels) {
      session.close()
    }

} 
Example 35
Source File: ContextualizedLogger.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.logging

import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.daml.grpc.GrpcException
import io.grpc.Status
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.util.{Failure, Try}
import scala.util.control.NonFatal

object ContextualizedLogger {

  // Caches loggers to prevent them from needlessly wasting memory
  // Replicates the behavior of the underlying Slf4j logger factory
  private[this] val cache = TrieMap.empty[String, ContextualizedLogger]

  // Allows to explicitly pass a logger, should be used for testing only
  private[logging] def createFor(withoutContext: Logger): ContextualizedLogger =
    new ContextualizedLogger(withoutContext)

  // Slf4j handles the caching of the underlying logger itself
  private[logging] def createFor(name: String): ContextualizedLogger =
    createFor(LoggerFactory.getLogger(name))

  
  def get(clazz: Class[_]): ContextualizedLogger = {
    val name = clazz.getName.stripSuffix("$")
    cache.getOrElseUpdate(name, createFor(name))
  }

}

final class ContextualizedLogger private (val withoutContext: Logger) {

  val trace = new LeveledLogger.Trace(withoutContext)
  val debug = new LeveledLogger.Debug(withoutContext)
  val info = new LeveledLogger.Info(withoutContext)
  val warn = new LeveledLogger.Warn(withoutContext)
  val error = new LeveledLogger.Error(withoutContext)

  private def internalOrUnknown(code: Status.Code): Boolean =
    code == Status.Code.INTERNAL || code == Status.Code.UNKNOWN

  private def logError(t: Throwable)(implicit logCtx: LoggingContext): Unit =
    error("Unhandled internal error", t)

  def logErrorsOnCall[Out](implicit logCtx: LoggingContext): PartialFunction[Try[Out], Unit] = {
    case Failure(e @ GrpcException(s, _)) =>
      if (internalOrUnknown(s.getCode)) {
        logError(e)
      }
    case Failure(NonFatal(e)) =>
      logError(e)
  }

  def logErrorsOnStream[Out](implicit logCtx: LoggingContext): Flow[Out, Out, NotUsed] =
    Flow[Out].mapError {
      case e @ GrpcException(s, _) =>
        if (internalOrUnknown(s.getCode)) {
          logError(e)
        }
        e
      case NonFatal(e) =>
        logError(e)
        e
    }

} 
Example 36
Source File: ThreadPoolMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import java.util.Collections
import java.util.concurrent.ThreadPoolExecutor

import scala.collection.JavaConverters.seqAsJavaListConverter
import scala.collection.concurrent.TrieMap

import io.prometheus.client.Collector
import io.prometheus.client.Collector.MetricFamilySamples
import io.prometheus.client.GaugeMetricFamily

object ThreadPoolMetrics extends Collector {
  val map = TrieMap[String, Option[ThreadPoolExecutor]]()
  this.register()
  override def collect(): java.util.List[MetricFamilySamples] = {
    val dispatcherNameList = List("dispatcherName").asJava
    val activeThreadCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_active_thread_count",
      "Akka ThreadPool Dispatcher Active Thread Count", dispatcherNameList)
    val corePoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_core_pool_size",
      "Akka ThreadPool Dispatcher Core Pool Size", dispatcherNameList)
    val currentPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_current_pool_size",
      "Akka ThreadPool Dispatcher Current Pool Size", dispatcherNameList)
    val largestPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_largest_pool_size",
      "Akka ThreadPool Dispatcher Largest Pool Size", dispatcherNameList)
    val maxPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_max_pool_size",
      "Akka ThreadPool Dispatcher Max Pool Size", dispatcherNameList)
    val completedTaskCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_completed_task_count",
      "Akka ThreadPoolExecutor Dispatcher Completed Task Count", dispatcherNameList)
    val totalTaskCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_total_task_count",
      "Akka ThreadPoolExecutor Dispatcher Total Task Count", dispatcherNameList)
    map.foreach { case (dispatcherName, tpeOption) =>
      val dispatcherNameList = List(dispatcherName).asJava
      tpeOption match {
        case Some(tpe) => {
          activeThreadCountGauge.addMetric(dispatcherNameList, tpe.getActiveCount)
          corePoolSizeGauge.addMetric(dispatcherNameList, tpe.getCorePoolSize)
          currentPoolSizeGauge.addMetric(dispatcherNameList, tpe.getPoolSize)
          largestPoolSizeGauge.addMetric(dispatcherNameList, tpe.getLargestPoolSize)
          maxPoolSizeGauge.addMetric(dispatcherNameList, tpe.getMaximumPoolSize)
          completedTaskCountGauge.addMetric(dispatcherNameList, tpe.getCompletedTaskCount)
          totalTaskCountGauge.addMetric(dispatcherNameList, tpe.getTaskCount)
        }
        case None => {
          activeThreadCountGauge.addMetric(dispatcherNameList, 0)
          corePoolSizeGauge.addMetric(dispatcherNameList, 0)
          currentPoolSizeGauge.addMetric(dispatcherNameList, 0)
          largestPoolSizeGauge.addMetric(dispatcherNameList, 0)
          maxPoolSizeGauge.addMetric(dispatcherNameList, 0)
          completedTaskCountGauge.addMetric(dispatcherNameList, 0)
          totalTaskCountGauge.addMetric(dispatcherNameList, 0)
        }
      }
    }
    val jul = new java.util.ArrayList[MetricFamilySamples]
    jul.add(activeThreadCountGauge)
    jul.add(corePoolSizeGauge)
    jul.add(currentPoolSizeGauge)
    jul.add(largestPoolSizeGauge)
    jul.add(maxPoolSizeGauge)
    jul.add(completedTaskCountGauge)
    jul.add(totalTaskCountGauge)
    Collections.unmodifiableList(jul)
  }

  def add(dispatcherName: String, tpe: ThreadPoolExecutor): Unit = {
    map.put(dispatcherName, Some(tpe))
  }

  def remove(dispatcherName: String): Unit = {
    map.put(dispatcherName, None)
  }
} 
Example 37
Source File: ForkJoinPoolMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import java.util.Collections

import scala.collection.JavaConverters.seqAsJavaListConverter
import scala.collection.concurrent.TrieMap

import io.prometheus.client.Collector
import io.prometheus.client.Collector.MetricFamilySamples
import io.prometheus.client.GaugeMetricFamily

object ForkJoinPoolMetrics extends Collector {
  val map = TrieMap[String, Option[ForkJoinPoolLike]]()
  this.register()
  override def collect(): java.util.List[MetricFamilySamples] = {
    val dispatcherNameList = List("dispatcherName").asJava
    val parallelismGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_parellelism",
      "Akka ForkJoinPool Dispatcher Parellelism", dispatcherNameList)
    val poolSizeGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_pool_size",
      "Akka ForkJoinPool Dispatcher Pool Size", dispatcherNameList)
    val activeThreadCountGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_active_thread_count",
      "Akka ForkJoinPool Dispatcher Active Thread Count", dispatcherNameList)
    val runningThreadCountGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_running_thread_count",
      "Akka ForkJoinPool Dispatcher Running Thread Count", dispatcherNameList)
    val queuedTaskCountGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_queued_task_count",
      "Akka ForkJoinPool Dispatcher Queued Task Count", dispatcherNameList)
    val queuedSubmissionCountGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_queued_submission_count",
      "Akka ForkJoinPool Dispatcher Queued Submission Count", dispatcherNameList)
    val stealCountGauge = new GaugeMetricFamily("akka_dispatcher_forkjoinpool_steal_count",
      "Akka ForkJoinPool Dispatcher Steal Count", dispatcherNameList)
    map.foreach { case (dispatcherName, fjpOption) =>
      val dispatcherNameList = List(dispatcherName).asJava
      fjpOption match {
        case Some(fjp) => {
          parallelismGauge.addMetric(dispatcherNameList, fjp.getParallelism)
          poolSizeGauge.addMetric(dispatcherNameList, fjp.getPoolSize)
          activeThreadCountGauge.addMetric(dispatcherNameList, fjp.getActiveThreadCount)
          runningThreadCountGauge.addMetric(dispatcherNameList, fjp.getRunningThreadCount)
          queuedSubmissionCountGauge.addMetric(dispatcherNameList, fjp.getQueuedSubmissionCount)
          queuedTaskCountGauge.addMetric(dispatcherNameList, fjp.getQueuedTaskCount)
          stealCountGauge.addMetric(dispatcherNameList, fjp.getStealCount)
        }
        case None => {
          parallelismGauge.addMetric(dispatcherNameList, 0)
          poolSizeGauge.addMetric(dispatcherNameList, 0)
          activeThreadCountGauge.addMetric(dispatcherNameList, 0)
          runningThreadCountGauge.addMetric(dispatcherNameList, 0)
          queuedSubmissionCountGauge.addMetric(dispatcherNameList, 0)
          queuedTaskCountGauge.addMetric(dispatcherNameList, 0)
          stealCountGauge.addMetric(dispatcherNameList, 0)
        }
      }

    }
    val jul = new java.util.ArrayList[MetricFamilySamples]
    jul.add(parallelismGauge)
    jul.add(poolSizeGauge)
    jul.add(activeThreadCountGauge)
    jul.add(runningThreadCountGauge)
    jul.add(queuedSubmissionCountGauge)
    jul.add(queuedTaskCountGauge)
    jul.add(stealCountGauge)
    Collections.unmodifiableList(jul)
  }

  def add(dispatcherName: String, fjp: ForkJoinPoolLike): Unit = {
    map.put(dispatcherName, Some(fjp))
  }

  def remove(dispatcherName: String): Unit = {
    map.put(dispatcherName, None)
  }
} 
Example 38
Source File: RouterMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import scala.collection.concurrent.TrieMap
import scala.util.control.NonFatal

import org.slf4j.LoggerFactory

import io.prometheus.client.{Counter, Gauge}

object RouterMetrics {
  private val logger = LoggerFactory.getLogger(RouterMetrics.getClass)
  private val map = TrieMap[Entity, RouterMetrics]()
  def metricsFor(e: Entity): Option[RouterMetrics] = {
    try {
      Some(map.getOrElseUpdate(e, new RouterMetrics(e)))
    } catch {
      case NonFatal(t) => {
        logger.warn("Issue with getOrElseUpdate (failing over to simple get)", t)
        map.get(e)
      }
    }
  }
  def hasMetricsFor(e: Entity): Boolean = map.contains(e)
}

class RouterMetrics(entity: Entity) {
  val actorName = metricFriendlyActorName(entity.name)
  val routingTime = Gauge.build().name(s"akka_router_routing_time_$actorName").help("Akka Router routing time (Seconds)").register()
  val processingTime = Gauge.build().name(s"akka_router_processing_time_$actorName").help("Akka Router processing time (Seconds)").register()
  val timeInMailbox = Gauge.build().name(s"akka_router_time_in_mailbox_$actorName").help("Akka Router time in mailbox (Seconds)").register()
  val messages = Counter.build().name(s"akka_router_message_count_$actorName").help("Akka Router messages").register()
  val errors = Counter.build().name(s"akka_router_error_count_$actorName").help("Akka Router errors").register()
} 
Example 39
Source File: ActorMetrics.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import scala.collection.concurrent.TrieMap
import scala.util.control.NonFatal

import org.slf4j.LoggerFactory

import io.prometheus.client.{Counter, Gauge}

object ActorMetrics {
  private val logger = LoggerFactory.getLogger(ActorMetrics.getClass)
  private val map = TrieMap[Entity, ActorMetrics]()
  def metricsFor(e: Entity): Option[ActorMetrics] = {
    try {
      Some(map.getOrElseUpdate(e, new ActorMetrics(e)))
    } catch {
      case NonFatal(t) => {
        logger.warn("Issue with getOrElseUpdate (failing over to simple get)", t)
        map.get(e)
      }
    }
  }
  def hasMetricsFor(e: Entity): Boolean = map.contains(e)
}

class ActorMetrics(entity: Entity) {
  val actorName = metricFriendlyActorName(entity.name)
  val mailboxSize = Gauge.build().name(s"akka_actor_mailbox_size_$actorName").help("Akka Actor mailbox size").register()
  val processingTime = Gauge.build().name(s"akka_actor_processing_time_$actorName").help("Akka Actor processing time (Seconds)").register()
  val timeInMailbox = Gauge.build().name(s"akka_actor_time_in_mailbox_$actorName").help("Akka Actor time in mailbox (Seconds)").register()
  val messages = Counter.build().name(s"akka_actor_message_count_$actorName").help("Akka Actor messages").register()
  val errors = Counter.build().name(s"akka_actor_error_count_$actorName").help("Akka Actor errors").register()
} 
Example 40
Source File: JWTAuthenticatorRepository.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.identities.repositories

import com.dataengi.crm.identities.daos.JWTAuthenticatorDAO
import com.dataengi.crm.identities.models.JWTAuthenticatorData
import com.google.common.cache.CacheBuilder
import com.google.inject.{Inject, Singleton}
import com.mohiva.play.silhouette.api.crypto.AuthenticatorEncoder
import com.mohiva.play.silhouette.api.repositories.AuthenticatorRepository
import com.mohiva.play.silhouette.impl.authenticators.{JWTAuthenticator, JWTAuthenticatorSettings}

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scalacache.guava.GuavaCache

trait JWTAuthenticatorRepository extends AuthenticatorRepository[JWTAuthenticator]

@Singleton
class JWTAuthenticatorRepositoryInMemoryImplementation @Inject()(implicit val executionContext: ExecutionContext)
    extends JWTAuthenticatorRepository {

  protected val repository = TrieMap[String, JWTAuthenticator]()

  override def find(id: String): Future[Option[JWTAuthenticator]] = Future {
    repository.get(id)
  }

  override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = Future {
    repository.put(authenticator.id, authenticator)
    authenticator
  }

  override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = Future {
    repository.update(authenticator.id, authenticator)
    authenticator
  }

  override def remove(id: String): Future[Unit] = Future {
    repository.remove(id)
  }
}

@Singleton
class JWTAuthenticatorSerializableRepositoryImplementation @Inject()(implicit val executionContext: ExecutionContext,
                                                                     authenticatorDAO: JWTAuthenticatorDAO,
                                                                     authenticatorEncoder: AuthenticatorEncoder,
                                                                     conf: JWTAuthenticatorSettings)
    extends JWTAuthenticatorRepository {

  override def find(id: String): Future[Option[JWTAuthenticator]] =
    authenticatorDAO
      .getOption(id)
      .map(_.map(data => JWTAuthenticator.unserialize(data.authenticator, authenticatorEncoder, conf).get))

  override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] =
    for {
      data      <- serializeData(authenticator)
      addResult <- authenticatorDAO.add(data)
    } yield authenticator

  private def serializeData(authenticator: JWTAuthenticator): Future[JWTAuthenticatorData] = {
    Future {
      val serializedData = JWTAuthenticator.serialize(authenticator, authenticatorEncoder, conf)
      JWTAuthenticatorData(serializedData, authenticator.id)
    }
  }

  override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] =
    for {
      updatedAuthenticator <- authenticatorDAO.get(authenticator.id)
      data                 <- serializeData(authenticator)
      updateResult         <- authenticatorDAO.update(data.copy(id = updatedAuthenticator.id))
    } yield authenticator

  override def remove(id: String): Future[Unit] = authenticatorDAO.delete(id)

}

@Singleton
class JWTAuthenticatorCacheRepositoryImplementation @Inject()(implicit val executionContext: ExecutionContext)
    extends JWTAuthenticatorRepository {

  import scalacache._

  val underlyingGuavaCache = CacheBuilder.newBuilder().maximumSize(10000L).build[String, Object]
  implicit val scalaCache  = ScalaCache(GuavaCache(underlyingGuavaCache))
  val cache                = typed[JWTAuthenticator, NoSerialization]

  override def find(id: String): Future[Option[JWTAuthenticator]] = cache.get(id)

  override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] =
    cache.put(authenticator.id)(authenticator).map(_ => authenticator)

  override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] =
    cache.put(authenticator.id)(authenticator).map(_ => authenticator)

  override def remove(id: String): Future[Unit] = cache.remove(id)

} 
Example 41
Source File: BaseInMemoryRepository.scala    From crm-seed   with Apache License 2.0 5 votes vote down vote up
package com.dataengi.crm.common.repositories

import java.util.concurrent.atomic.AtomicInteger

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import com.dataengi.crm.common.context.types._
import com.dataengi.crm.common.errors.ValueNotFound

abstract class BaseInMemoryRepository[T] extends AutoIncRepository[T] with KeyExtractor[T, Long] {

  protected val repository = TrieMap[Key, T]()

  private val atomicInteger = new AtomicInteger()

  override protected def getKey(value: T): Key = atomicInteger.getAndIncrement().toLong

  protected def beforeSave(key: Key, value: T): T = value

  override def getAll(): Or[List[T]] =
    Future {
      repository.values.toList
    }.toOr

  override def remove(id: Key): Or[Unit] =
    Future {
      repository.remove(id)
    }.toEmptyOr

  override def add(value: T): Or[Key] =
    Future {
      val key = getKey(value)
      repository.put(key, beforeSave(key, value))
      key
    }.toOr

  override def add(values: List[T]): Or[List[Key]] =
    values.traverseC(add)

  override def get(id: Key): Or[T] =
    repository.get(id) match {
      case Some(value) => value.toOr
      case None        => ValueNotFound(id).toErrorOr
    }

  override def update(id: Key, value: T): Or[Unit] =
    Future {
      repository.update(id, value)
    }.toOr

  override def getOption(id: Key): Or[Option[T]] =
    Future {
      repository.get(id)
    }.toOr

} 
Example 42
Source File: RateCache.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.caches

import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.dex.db.RateDB
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves}
import org.iq80.leveldb.DB

import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap

trait RateCache {

  
  def deleteRate(asset: Asset): Option[Double]

}

object RateCache {

  private val WavesRate = Option(1d)

  def apply(db: DB): RateCache = new RateCache {

    private val rateDB  = RateDB(db)
    private val rateMap = new ConcurrentHashMap[IssuedAsset, Double](rateDB.getAllRates.asJava)

    def upsertRate(asset: Asset, value: Double): Option[Double] =
      asset.fold { WavesRate } { issuedAsset =>
        rateDB.upsertRate(issuedAsset, value)
        Option(rateMap.put(issuedAsset, value))
      }

    def getRate(asset: Asset): Option[Double] = asset.fold(WavesRate)(asset => Option(rateMap get asset))

    def getAllRates: Map[Asset, Double] = {
      rateMap.asScala.toMap.map { case (issuedAsset, value) => Asset.fromCompatId(issuedAsset.compatId) -> value } + (Waves -> 1d)
    }

    def deleteRate(asset: Asset): Option[Double] = asset.fold(WavesRate) { issuedAsset =>
      rateDB.deleteRate(issuedAsset)
      Option(rateMap.remove(issuedAsset))
    }
  }

  def inMem: RateCache = new RateCache {

    private val rates: TrieMap[Asset, Double] = TrieMap(Waves -> 1d)

    def upsertRate(asset: Asset, value: Double): Option[Double] = {
      asset.fold { WavesRate } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates += (asset -> value)
        previousValue
      }
    }

    def getRate(asset: Asset): Option[Double] = rates.get(asset)
    def getAllRates: Map[Asset, Double]       = rates.toMap

    def deleteRate(asset: Asset): Option[Double] =
      asset.fold { Option(1d) } { issuedAsset =>
        val previousValue = rates.get(issuedAsset)
        rates -= issuedAsset
        previousValue
      }
  }
} 
Example 43
Source File: Publisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, TimeUnit}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import com.sksamuel.exts.concurrent.ExecutorImplicits._

import scala.collection.concurrent.TrieMap

trait Publisher[T] {
  def subscribe(subscriber: Subscriber[T])
}

object Publisher extends Logging {

  
  def merge[T](publishers: Seq[Publisher[T]], sentinel: T)(implicit executor: ExecutorService): Publisher[T] = {

    new Publisher[T] {
      override def subscribe(s: Subscriber[T]): Unit = {

        // subscribers to the returned publisher will be fed from an intermediate queue
        val queue = new LinkedBlockingQueue[Either[Throwable, T]](DataStream.DefaultBufferSize)

        // to keep track of how many subscribers are yet to finish; only once all upstream
        // publishers have finished will this subscriber be completed.
        val outstanding = new AtomicInteger(publishers.size)

        // we make a collection of all the subscriptions, so if there's an error at any point in the
        // merge, we can cancel all upstream producers
        val subscriptions = TrieMap.empty[Subscription, Int]

        // this cancellable can be used to cancel all the subscriptions
        val subscription = new Subscription {
          override def cancel(): Unit = subscriptions.keys.foreach(_.cancel)
        }

        // status flag that an error occured and the subscriptions should watch for it
        val errorRef = new AtomicReference[Throwable](null)
        def terminate(t: Throwable): Unit = {
          logger.error(s"Error in merge", t)
          errorRef.set(t)
          subscription.cancel()
          queue.clear()
          queue.put(Right(sentinel))
        }

        // each subscriber will occupy its own thread, on the provided executor
        publishers.foreach { publisher =>
          executor.submit {
            try {
              publisher.subscribe(new Subscriber[T] {
                override def subscribed(sub: Subscription): Unit = if (sub != null) subscriptions.put(sub, 1)                
                override def next(t: T): Unit = {
                  var success = true
                  do {
                    success = queue.offer(Right(t), 100, TimeUnit.MILLISECONDS)
                  } while(!success && errorRef.get == null)
                }
                override def error(t: Throwable): Unit = terminate(t)
                override def completed(): Unit = {
                  if (outstanding.decrementAndGet() == 0) {
                    logger.debug("All subscribers have finished; marking queue with sentinel")
                    queue.put(Right(sentinel))
                  }
                }
              })
            } catch {
              case t: Throwable => terminate(t)
            }
          }
        }

        try {
          s.subscribed(subscription)
          BlockingQueueConcurrentIterator(queue, Right(sentinel)).takeWhile(_ => errorRef.get == null).foreach {
            case Left(t) => s.error(t)
            case Right(t) => s.next(t)
          }
          // once we've had an error that's it, we don't complete the subscriber
          if (errorRef.get == null)
            s.completed()
          else 
            s.error(errorRef.get)
        } catch {
          case t: Throwable =>
            logger.error("Error in merge subscriber", t)
            subscription.cancel()
            s.error(t)
        }

        logger.debug("Merge subscriber has completed")
      }
    }
  }
} 
Example 44
Source File: SidechainTransactionActor.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.api.http

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import com.horizen.SidechainTypes
import com.horizen.api.http.SidechainTransactionActor.ReceivableMessages.BroadcastTransaction
import scorex.core.NodeViewHolder.ReceivableMessages.LocallyGeneratedTransaction
import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.{FailedTransaction, SuccessfulTransaction}
import scorex.util.{ModifierId, ScorexLogging}

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Promise}

class SidechainTransactionActor[T <: SidechainTypes#SCBT](sidechainNodeViewHolderRef: ActorRef)(implicit ec: ExecutionContext)
  extends Actor with ScorexLogging {

  private var transactionMap : TrieMap[String, Promise[ModifierId]] = TrieMap()

  override def preStart(): Unit = {
    context.system.eventStream.subscribe(self, classOf[SuccessfulTransaction[T]])
    context.system.eventStream.subscribe(self, classOf[FailedTransaction])
  }

  protected def broadcastTransaction: Receive = {
    case BroadcastTransaction(transaction) =>
      val promise = Promise[ModifierId]
      val future = promise.future
      transactionMap(transaction.id) = promise
      sender() ! future

      sidechainNodeViewHolderRef ! LocallyGeneratedTransaction[SidechainTypes#SCBT](transaction)
  }

  protected def sidechainNodeViewHolderEvents: Receive = {
    case SuccessfulTransaction(transaction) =>
      transactionMap.remove(transaction.id) match {
        case Some(promise) => promise.success(transaction.id)
        case None =>
      }
    case FailedTransaction(transactionId, throwable, _) =>
      transactionMap.remove(transactionId) match {
        case Some(promise) => promise.failure(throwable)
        case None =>
      }
  }

  override def receive: Receive = {
    broadcastTransaction orElse
    sidechainNodeViewHolderEvents orElse {
      case message: Any => log.error("SidechainTransactionActor received strange message: " + message)
    }
  }
}

object SidechainTransactionActor {

  object ReceivableMessages {

    case class BroadcastTransaction[T <: SidechainTypes#SCBT](transaction: T)

  }

}

object SidechainTransactionActorRef {
  def props(sidechainNodeViewHolderRef: ActorRef)
           (implicit ec: ExecutionContext): Props =
    Props(new SidechainTransactionActor(sidechainNodeViewHolderRef))

  def apply(sidechainNodeViewHolderRef: ActorRef)
           (implicit system: ActorSystem, ec: ExecutionContext): ActorRef =
    system.actorOf(props(sidechainNodeViewHolderRef))
} 
Example 45
Source File: MemoryUserRequestQueue.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.queue

import java.util.concurrent.atomic.AtomicLong

import com.ing.wbaa.rokku.proxy.data.{ RequestId, User }
import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId
import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory
import com.typesafe.config.ConfigFactory

import scala.collection.concurrent.TrieMap


  private def isAllowedToAddToRequestQueue(user: User) = {
    synchronized {
      queuePerUser.putIfAbsent(user.userName.value, new AtomicLong(0))
      val userRequests = queuePerUser(user.userName.value)
      val userOccupiedQueue = (100 * userRequests.get()) / maxQueueSize
      val maxQueueBeforeBlockInPercentPerUser = maxQueueBeforeBlockInPercent / queuePerUser.size
      val isOverflown = userOccupiedQueue >= maxQueueBeforeBlockInPercentPerUser
      currentQueueSize.get() < maxQueueSize && !isOverflown
    }
  }

  private def logDebug(user: User, queueRequestCount: Long, userRequestCount: Long, method: String)(implicit id: RequestId): Unit = {
    logger.debug("request queue = {}", queueRequestCount)
    logger.debug("user request queue = {}", method, user.userName.value, userRequestCount)
    logger.debug("active users size = {}", queuePerUser.size)
  }

  private def metricName(user: User): String = {
    MetricsFactory.REQUEST_QUEUE_OCCUPIED_BY_USER.replace(MetricsFactory.REQUEST_USER, s"${user.userName.value.head.toString}..")
  }
} 
Example 46
Source File: MahaRequestContext.scala    From maha   with Apache License 2.0 5 votes vote down vote up
package com.yahoo.maha.service

import com.yahoo.maha.core.bucketing.BucketParams
import com.yahoo.maha.core.request.ReportingRequest
import org.apache.commons.codec.digest.DigestUtils

import scala.collection.concurrent.TrieMap


case class MahaRequestContext(registryName: String
                              , bucketParams: BucketParams
                              , reportingRequest: ReportingRequest
                              , rawJson: Array[Byte]
                              , context: Map[String, Any]
                              , requestId: String
                              , userId: String
                              , requestStartTime: Long = System.currentTimeMillis()
                             ) {
  lazy val mutableState = new TrieMap[String, Any]()
  lazy val requestHashOption: Option[String] = if(rawJson!=null) {
      Some(DigestUtils.md5Hex(rawJson))
    } else {
      None
    }
} 
Example 47
Source File: Http4s.scala    From kamon-http4s   with Apache License 2.0 5 votes vote down vote up
package kamon.http4s

import com.typesafe.config.Config
import kamon.util.DynamicAccess
import kamon.Kamon
import kamon.instrumentation.http.{HttpMessage, HttpOperationNameGenerator}

object Http4s {
  @volatile var nameGenerator: HttpOperationNameGenerator = nameGeneratorFromConfig(Kamon.config())

  private def nameGeneratorFromConfig(config: Config): HttpOperationNameGenerator = {
    val dynamic = new DynamicAccess(getClass.getClassLoader)
    val nameGeneratorFQCN = config.getString("kamon.instrumentation.http4s.client.tracing.operations.name-generator")
    dynamic.createInstanceFor[HttpOperationNameGenerator](nameGeneratorFQCN, Nil)
  }

  Kamon.onReconfigure { newConfig =>
    nameGenerator = nameGeneratorFromConfig(newConfig)
  }
}


class DefaultNameGenerator extends HttpOperationNameGenerator {

  import java.util.Locale

  import scala.collection.concurrent.TrieMap

  private val localCache = TrieMap.empty[String, String]
  private val normalizePattern = """\$([^<]+)<[^>]+>""".r


  override def name(request: HttpMessage.Request): Option[String] = {
    Some(
      localCache.getOrElseUpdate(s"${request.method}${request.path}", {
        // Convert paths of form GET /foo/bar/$paramname<regexp>/blah to foo.bar.paramname.blah.get
        val p = normalizePattern.replaceAllIn(request.path, "$1").replace('/', '.').dropWhile(_ == '.')
        val normalisedPath = {
          if (p.lastOption.exists(_ != '.')) s"$p."
          else p
        }
        s"$normalisedPath${request.method.toLowerCase(Locale.ENGLISH)}"
      })
    )
  }
} 
Example 48
Source File: LeanMessagingProvider.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.connector.lean

import java.util.concurrent.BlockingQueue
import java.util.concurrent.LinkedBlockingQueue

import scala.collection.mutable.Map
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration.FiniteDuration
import scala.util.Success
import scala.util.Try

import akka.actor.ActorSystem
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.connector.MessageConsumer
import org.apache.openwhisk.core.connector.MessageProducer
import org.apache.openwhisk.core.connector.MessagingProvider
import org.apache.openwhisk.core.entity.ByteSize


  val queues: Map[String, BlockingQueue[Array[Byte]]] =
    new TrieMap[String, BlockingQueue[Array[Byte]]]

  def getConsumer(config: WhiskConfig, groupId: String, topic: String, maxPeek: Int, maxPollInterval: FiniteDuration)(
    implicit logging: Logging,
    actorSystem: ActorSystem): MessageConsumer = {

    val queue = queues.getOrElseUpdate(topic, new LinkedBlockingQueue[Array[Byte]]())

    new LeanConsumer(queue, maxPeek)
  }

  def getProducer(config: WhiskConfig, maxRequestSize: Option[ByteSize] = None)(
    implicit logging: Logging,
    actorSystem: ActorSystem): MessageProducer =
    new LeanProducer(queues)

  def ensureTopic(config: WhiskConfig, topic: String, topicConfigKey: String, maxMessageBytes: Option[ByteSize] = None)(
    implicit logging: Logging): Try[Unit] = {
    if (queues.contains(topic)) {
      Success(logging.info(this, s"topic $topic already existed"))
    } else {
      queues.put(topic, new LinkedBlockingQueue[Array[Byte]]())
      Success(logging.info(this, s"topic $topic created"))
    }
  }
} 
Example 49
Source File: HeartbeatClient.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.kernel.protocol.v5.client.socket

import akka.actor.{ActorRef, Actor}
import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import akka.pattern.ask
import org.apache.toree.kernel.protocol.v5.client.ActorLoader
import org.apache.toree.utils.LogLike
import org.apache.toree.kernel.protocol.v5.UUID
import scala.collection.concurrent.{Map, TrieMap}
import scala.concurrent.duration._

object HeartbeatMessage {}

class HeartbeatClient(
  socketFactory : SocketFactory,
  actorLoader: ActorLoader,
  signatureEnabled: Boolean
) extends Actor with LogLike {
  logger.debug("Created new Heartbeat Client actor")
  implicit val timeout = Timeout(1.minute)

  val futureMap: Map[UUID, ActorRef] = TrieMap[UUID, ActorRef]()
  val socket = socketFactory.HeartbeatClient(context.system, self)

  override def receive: Receive = {
    // from Heartbeat
    case message: ZMQMessage =>
      val id = message.frames.map((byteString: ByteString) =>
        new String(byteString.toArray)).mkString("\n")
      logger.info(s"Heartbeat client receive:$id")
      futureMap(id) ! true
      futureMap.remove(id)

    // from SparkKernelClient
    case HeartbeatMessage =>
      import scala.concurrent.ExecutionContext.Implicits.global
      val id = java.util.UUID.randomUUID().toString
      futureMap += (id -> sender)
      logger.info(s"Heartbeat client send: $id")
      val future = socket ? ZMQMessage(ByteString(id.getBytes))
      future.onComplete {
        // future always times out because server "tells" response {
        case(_) => futureMap.remove(id)
      }
  }
} 
Example 50
Source File: MemoryAttachmentStore.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.memory

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ContentType
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.util.{ByteString, ByteStringBuilder}
import org.apache.openwhisk.common.LoggingMarkers.{
  DATABASE_ATTS_DELETE,
  DATABASE_ATT_DELETE,
  DATABASE_ATT_GET,
  DATABASE_ATT_SAVE
}
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.database.StoreUtils._
import org.apache.openwhisk.core.database._
import org.apache.openwhisk.core.entity.DocId

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag

object MemoryAttachmentStoreProvider extends AttachmentStoreProvider {
  override def makeStore[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
                                                              logging: Logging,
                                                              materializer: ActorMaterializer): AttachmentStore =
    new MemoryAttachmentStore(implicitly[ClassTag[D]].runtimeClass.getSimpleName.toLowerCase)
}


  override protected[core] def readAttachment[T](docId: DocId, name: String, sink: Sink[ByteString, Future[T]])(
    implicit transid: TransactionId): Future[T] = {

    val start =
      transid.started(
        this,
        DATABASE_ATT_GET,
        s"[ATT_GET] '$dbName' finding attachment '$name' of document 'id: $docId'")

    val f = attachments.get(attachmentKey(docId, name)) match {
      case Some(Attachment(bytes)) =>
        val r = Source.single(bytes).toMat(sink)(Keep.right).run
        r.map(t => {
          transid.finished(this, start, s"[ATT_GET] '$dbName' completed: found attachment '$name' of document '$docId'")
          t
        })
      case None =>
        transid.finished(
          this,
          start,
          s"[ATT_GET] '$dbName', retrieving attachment '$name' of document '$docId'; not found.")
        Future.failed(NoDocumentException("Not found on 'readAttachment'."))
    }
    reportFailure(
      f,
      start,
      failure => s"[ATT_GET] '$dbName' internal error, name: '$name', doc: '$docId', failure: '${failure.getMessage}'")
  }

  override protected[core] def deleteAttachments(docId: DocId)(implicit transid: TransactionId): Future[Boolean] = {
    val start = transid.started(this, DATABASE_ATTS_DELETE, s"[ATTS_DELETE] uploading attachment of document '$docId'")

    val prefix = docId + "/"
    attachments --= attachments.keySet.filter(_.startsWith(prefix))
    transid.finished(this, start, s"[ATTS_DELETE] completed: delete attachment of document '$docId'")
    Future.successful(true)
  }

  override protected[core] def deleteAttachment(docId: DocId, name: String)(
    implicit transid: TransactionId): Future[Boolean] = {
    val start = transid.started(this, DATABASE_ATT_DELETE, s"[ATT_DELETE] uploading attachment of document '$docId'")
    attachments.remove(attachmentKey(docId, name))
    transid.finished(this, start, s"[ATT_DELETE] completed: delete attachment of document '$docId'")
    Future.successful(true)
  }

  def attachmentCount: Int = attachments.size

  def isClosed = closed

  override def shutdown(): Unit = {
    closed = true
  }

  private def attachmentKey(docId: DocId, name: String) = s"${docId.id}/$name"
} 
Example 51
Source File: RateThrottler.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.entitlement

import scala.collection.concurrent.TrieMap

import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.entity.Identity
import org.apache.openwhisk.core.entity.UUID
import java.util.concurrent.atomic.AtomicInteger


  def update(maxPerMinute: Int): Int = {
    roll()
    lastMinCount.incrementAndGet()
  }

  def roll(): Unit = {
    val curMin = getCurrentMinute
    if (curMin != lastMin) {
      lastMin = curMin
      lastMinCount.set(0)
    }
  }

  private def getCurrentMinute = System.currentTimeMillis / (60 * 1000)
} 
Example 52
Source File: LocalEntitlement.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.entitlement

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import akka.actor.ActorSystem
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.WhiskConfig
import org.apache.openwhisk.core.entity.{ControllerInstanceId, Identity, Subject}
import org.apache.openwhisk.core.loadBalancer.LoadBalancer

protected[core] class LocalEntitlementProvider(
  private val config: WhiskConfig,
  private val loadBalancer: LoadBalancer,
  private val controllerInstance: ControllerInstanceId)(implicit actorSystem: ActorSystem, logging: Logging)
    extends EntitlementProvider(config, loadBalancer, controllerInstance) {

  private implicit val executionContext = actorSystem.dispatcher

  private val matrix = LocalEntitlementProvider.matrix

  
  private val matrix = TrieMap[(Subject, String), Set[Privilege]]()
  override def instance(config: WhiskConfig, loadBalancer: LoadBalancer, instance: ControllerInstanceId)(
    implicit actorSystem: ActorSystem,
    logging: Logging) =
    new LocalEntitlementProvider(config: WhiskConfig, loadBalancer: LoadBalancer, instance)
} 
Example 53
Source File: WhiskChangeEventObserver.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb.cache

import akka.Done
import com.azure.data.cosmos.CosmosItemProperties
import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext
import com.google.common.base.Throwables
import kamon.metric.MeasurementUnit
import org.apache.openwhisk.common.{LogMarkerToken, Logging, MetricEmitter}
import org.apache.openwhisk.core.database.CacheInvalidationMessage
import org.apache.openwhisk.core.database.cosmosdb.CosmosDBConstants
import org.apache.openwhisk.core.database.cosmosdb.CosmosDBUtil.unescapeId
import org.apache.openwhisk.core.entity.CacheKey

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Seq
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

class WhiskChangeEventObserver(config: InvalidatorConfig, eventProducer: EventProducer)(implicit ec: ExecutionContext,
                                                                                        log: Logging)
    extends ChangeFeedObserver {
  import WhiskChangeEventObserver._

  override def process(context: ChangeFeedObserverContext, docs: Seq[CosmosItemProperties]): Future[Done] = {
    //Each observer is called from a pool managed by CosmosDB ChangeFeedProcessor
    //So its fine to have a blocking wait. If this fails then batch would be reread and
    //retried thus ensuring at-least-once semantics
    val f = eventProducer.send(processDocs(docs, config))
    f.andThen {
      case Success(_) =>
        MetricEmitter.emitCounterMetric(feedCounter, docs.size)
        recordLag(context, docs.last)
      case Failure(t) =>
        log.warn(this, "Error occurred while sending cache invalidation message " + Throwables.getStackTraceAsString(t))
    }
  }
}

trait EventProducer {
  def send(msg: Seq[String]): Future[Done]
}

object WhiskChangeEventObserver {
  val instanceId = "cache-invalidator"
  private val feedCounter =
    LogMarkerToken("cosmosdb", "change_feed", "count", tags = Map("collection" -> "whisks"))(MeasurementUnit.none)
  private val lags = new TrieMap[String, LogMarkerToken]

  
  def recordLag(context: ChangeFeedObserverContext, lastDoc: CosmosItemProperties): Unit = {
    val sessionToken = context.getFeedResponse.sessionToken()
    val lsnRef = lastDoc.get("_lsn")
    require(lsnRef != null, s"Non lsn defined in document $lastDoc")

    val lsn = lsnRef.toString.toLong
    val sessionLsn = getSessionLsn(sessionToken)
    val lag = sessionLsn - lsn
    val partitionKey = context.getPartitionKeyRangeId
    val gaugeToken = lags.getOrElseUpdate(partitionKey, createLagToken(partitionKey))
    MetricEmitter.emitGaugeMetric(gaugeToken, lag)
  }

  private def createLagToken(partitionKey: String) = {
    LogMarkerToken("cosmosdb", "change_feed", "lag", tags = Map("collection" -> "whisks", "pk" -> partitionKey))(
      MeasurementUnit.none)
  }

  def getSessionLsn(token: String): Long = {
    // Session Token can be in two formats. Either {PartitionKeyRangeId}:{LSN}
    // or {PartitionKeyRangeId}:{Version}#{GlobalLSN}
    // See https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/pull/113/files#diff-54cbd8ddcc33cab4120c8af04869f881
    val parsedSessionToken = token.substring(token.indexOf(":") + 1)
    val segments = parsedSessionToken.split("#")
    val lsn = if (segments.size < 2) segments(0) else segments(1)
    lsn.toLong
  }

  def processDocs(docs: Seq[CosmosItemProperties], config: InvalidatorConfig)(implicit log: Logging): Seq[String] = {
    docs
      .filter { doc =>
        val cid = Option(doc.getString(CosmosDBConstants.clusterId))
        val currentCid = config.clusterId

        //only if current clusterId is configured do a check
        currentCid match {
          case Some(_) => cid != currentCid
          case None    => true
        }
      }
      .map { doc =>
        val id = unescapeId(doc.id())
        log.info(this, s"Changed doc [$id]")
        val event = CacheInvalidationMessage(CacheKey(id), instanceId)
        event.serialize
      }
  }

} 
Example 54
Source File: StandaloneDockerContainerFactory.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool.docker

import akka.actor.ActorSystem
import org.apache.commons.lang3.SystemUtils
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig}
import org.apache.openwhisk.core.containerpool.{Container, ContainerFactory, ContainerFactoryProvider}
import org.apache.openwhisk.core.entity.{ByteSize, ExecManifest, InvokerInstanceId}
import pureconfig._
import pureconfig.generic.auto._

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future}

object StandaloneDockerContainerFactoryProvider extends ContainerFactoryProvider {
  override def instance(actorSystem: ActorSystem,
                        logging: Logging,
                        config: WhiskConfig,
                        instanceId: InvokerInstanceId,
                        parameters: Map[String, Set[String]]): ContainerFactory = {
    val client =
      if (SystemUtils.IS_OS_MAC) new DockerForMacClient()(actorSystem.dispatcher)(logging, actorSystem)
      else if (SystemUtils.IS_OS_WINDOWS) new DockerForWindowsClient()(actorSystem.dispatcher)(logging, actorSystem)
      else new DockerClientWithFileAccess()(actorSystem.dispatcher)(logging, actorSystem)

    new StandaloneDockerContainerFactory(instanceId, parameters)(
      actorSystem,
      actorSystem.dispatcher,
      logging,
      client,
      new RuncClient()(actorSystem.dispatcher)(logging, actorSystem))
  }
}

case class StandaloneDockerConfig(pullStandardImages: Boolean)

class StandaloneDockerContainerFactory(instance: InvokerInstanceId, parameters: Map[String, Set[String]])(
  implicit actorSystem: ActorSystem,
  ec: ExecutionContext,
  logging: Logging,
  docker: DockerApiWithFileAccess,
  runc: RuncApi)
    extends DockerContainerFactory(instance, parameters) {
  private val pulledImages = new TrieMap[String, Boolean]()
  private val factoryConfig = loadConfigOrThrow[StandaloneDockerConfig](ConfigKeys.standaloneDockerContainerFactory)

  override def createContainer(tid: TransactionId,
                               name: String,
                               actionImage: ExecManifest.ImageName,
                               userProvidedImage: Boolean,
                               memory: ByteSize,
                               cpuShares: Int)(implicit config: WhiskConfig, logging: Logging): Future[Container] = {

    //For standalone server usage we would also want to pull the OpenWhisk provided image so as to ensure if
    //local setup does not have the image then it pulls it down
    //For standard usage its expected that standard images have already been pulled in.
    val imageName = actionImage.resolveImageName(Some(runtimesRegistryConfig.url))
    val pulled =
      if (!userProvidedImage
          && factoryConfig.pullStandardImages
          && !pulledImages.contains(imageName)
          && actionImage.prefix.contains("openwhisk")) {
        docker.pull(imageName)(tid).map { _ =>
          logging.info(this, s"Pulled OpenWhisk provided image $imageName")
          pulledImages.put(imageName, true)
          true
        }
      } else Future.successful(true)

    pulled.flatMap(_ => super.createContainer(tid, name, actionImage, userProvidedImage, memory, cpuShares))
  }

  override def init(): Unit = {
    logging.info(
      this,
      s"Standalone docker container factory config pullStandardImages: ${factoryConfig.pullStandardImages}")
    super.init()
  }
}

trait WindowsDockerClient {
  self: DockerClient =>

  override protected def executableAlternatives: List[String] = {
    val executable = loadConfig[String]("whisk.docker.executable").toOption
    List("""C:\Program Files\Docker\Docker\resources\bin\docker.exe""") ++ executable
  }
}

class DockerForWindowsClient(dockerHost: Option[String] = None)(executionContext: ExecutionContext)(
  implicit log: Logging,
  as: ActorSystem)
    extends DockerForMacClient(dockerHost)(executionContext)
    with WindowsDockerClient {
  //Due to some Docker + Windows + Go parsing quirks need to add double quotes around whole command
  //See https://github.com/moby/moby/issues/27592#issuecomment-255227097
  override def inspectCommand: String = """"{{(index (index .NetworkSettings.Ports \"8080/tcp\") 0).HostPort}}""""
} 
Example 55
Source File: OpenTracing.scala    From sangria-slowlog   with Apache License 2.0 5 votes vote down vote up
package sangria.slowlog

import io.opentracing.{Scope, Span, Tracer}
import sangria.execution._
import sangria.schema.Context

import scala.collection.concurrent.TrieMap

class OpenTracing(parentSpan: Option[Span] = None, defaultOperationName: String = "UNNAMED")(implicit tracer: Tracer)
  extends Middleware[Any] with MiddlewareAfterField[Any] with MiddlewareErrorField[Any] {

  type QueryVal = TrieMap[Vector[Any], (Span, Scope)]
  type FieldVal = Unit

  def beforeQuery(context: MiddlewareQueryContext[Any, _, _]) = {
    val builder = tracer
      .buildSpan(context.operationName.getOrElse(defaultOperationName))
      .withTag("type", "graphql-query")

    val spanBuilder =
      parentSpan match {
        case Some(parent) => builder.asChildOf(parent)
        case None => builder
      }

    val span = spanBuilder.start()
    val scope = tracer.activateSpan(span)

    TrieMap(Vector.empty -> (span, scope))
  }

  def afterQuery(queryVal: QueryVal, context: MiddlewareQueryContext[Any, _, _]) =
    queryVal.get(Vector.empty).foreach { case (span, scope) =>
      span.finish()
      scope.close()
    }

  def beforeField(queryVal: QueryVal, mctx: MiddlewareQueryContext[Any, _, _], ctx: Context[Any, _]) = {
    val path = ctx.path.path
    val parentPath = path
      .dropRight(1)
      .reverse
      .dropWhile {
        case _: String => false
        case _: Int => true
      }
      .reverse

    val spanBuilder =
      queryVal
        .get(parentPath)
        .map { case (parentSpan, _) =>
          tracer
            .buildSpan(ctx.field.name)
            .withTag("type", "graphql-field")
            .asChildOf(parentSpan)
        }
        .getOrElse {
          tracer
            .buildSpan(ctx.field.name)
            .withTag("type", "graphql-field")
        }


    val span = spanBuilder.start()
    val scope = tracer.activateSpan(span)

    BeforeFieldResult(queryVal.update(ctx.path.path, (span, scope)), attachment = Some(ScopeAttachment(span, scope)))
  }

  def afterField(
      queryVal: QueryVal,
      fieldVal: FieldVal,
      value: Any,
      mctx: MiddlewareQueryContext[Any, _, _],
      ctx: Context[Any, _]) = {
    queryVal.get(ctx.path.path).foreach { case (span, scope) =>
      span.finish()
      scope.close()
    }
    None
  }

  def fieldError(
      queryVal: QueryVal,
      fieldVal: FieldVal,
      error: Throwable,
      mctx: MiddlewareQueryContext[Any, _, _],
      ctx: Context[Any, _]) =
    queryVal.get(ctx.path.path).foreach { case (span, scope) =>
      span.finish()
      scope.close()
    }
}

final case class ScopeAttachment(span: Span, scope: Scope) extends MiddlewareAttachment 
Example 56
Source File: MediaServer.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http.examples

import akka.http.scaladsl.model.HttpMethods.{GET, PUT}
import akka.http.scaladsl.model.MediaTypes.`image/png`
import akka.http.scaladsl.model.StatusCodes.NotFound
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import com.wix.e2e.http.RequestHandler
import com.wix.e2e.http.client.extractors._
import com.wix.e2e.http.server.WebServerFactory.aMockWebServerWith

import scala.collection.concurrent.TrieMap

class MediaServer(port: Int, uploadPath: String, downloadPath: String) {

  private val mockWebServer = aMockWebServerWith( {
    case HttpRequest(PUT, u, headers, entity, _) if u.path.tail == Path(uploadPath) =>
      handleMediaPost(u, headers.toList, entity)

    case HttpRequest(GET, u, headers, _, _) if u.path.tail.toString().startsWith(downloadPath) =>
      handleMediaGet(u, headers.toList)

  } : RequestHandler).onPort(port)
                     .build.start()

  def stop() = mockWebServer.stop()

  private def handleMediaPost(uri: Uri, headers: List[HttpHeader], entity: HttpEntity): HttpResponse = {
    val fileName = headers.find( _.name == "filename").map( _.value ).orElse( uri.query().toMap.get("f") ).get
    val media = entity.extractAsBytes
    files.put(fileName, media)
    HttpResponse()
  }

  private def handleMediaGet(uri: Uri, headers: List[HttpHeader]): HttpResponse = {
    val fileName = uri.path.reverse
                      .head.toString
                      .stripPrefix("/")
    files.get(fileName)
      .map( i => HttpResponse(entity = HttpEntity(`image/png`, i)) )
      .getOrElse( HttpResponse(status = NotFound) )
  }

  private val files = TrieMap.empty[String, Array[Byte]]
} 
Example 57
Source File: MarshallerTestSupport.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http.matchers.drivers

import com.wix.e2e.http.api.Marshaller

import scala.collection.concurrent.TrieMap
import scala.language.reflectiveCalls

trait MarshallerTestSupport {
  val marshaller = new Marshaller {
    val unmarshallResult = TrieMap.empty[String, AnyRef]
    val unmarshallError = TrieMap.empty[String, Throwable]

    def unmarshall[T: Manifest](jsonStr: String) = {
      unmarshallError.get(jsonStr).foreach( throw _ )
      unmarshallResult.getOrElse(jsonStr, throw new UnsupportedOperationException)
                      .asInstanceOf[T]
    }

    def marshall[T](t: T) = ???
  }

  def givenUnmarshallerWith[T <: AnyRef](someEntity: T, forContent: String)(implicit mn: Manifest[T]): Unit =
    marshaller.unmarshallResult.put(forContent, someEntity)

  def givenBadlyBehavingUnmarshallerFor[T : Manifest](withContent: String): Unit =
    marshaller.unmarshallError.put(withContent, new RuntimeException)
}

trait CustomMarshallerProvider {
  def marshaller: Marshaller
  implicit def customMarshaller: Marshaller = marshaller
} 
Example 58
Source File: MarshallerTestSupport.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http.drivers

import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import com.wix.e2e.http.api.Marshaller
import com.wix.e2e.http.drivers.MarshallingTestObjects.SomeCaseClass
import com.wix.test.random.{randomInt, randomStr}

import scala.collection.concurrent.TrieMap

trait MarshallerTestSupport {
  val someObject = SomeCaseClass(randomStr, randomInt)
  val content = randomStr

  def givenMarshallerThatUnmarshalWith(unmarshal: SomeCaseClass, forContent: String): Unit =
    MarshallingTestObjects.unmarshallResult.put(forContent, unmarshal)

  def givenMarshallerThatMarshal(content: String, to: SomeCaseClass): Unit =
    MarshallingTestObjects.marshallResult.put(to, content)

  def aResponseWith(body: String) = HttpResponse(entity = body)
  def aRequestWith(body: String) = HttpRequest(entity = body)
  val request = HttpRequest()
}

object MarshallingTestObjects {
  case class SomeCaseClass(s: String, i: Int)

  val marshallResult = TrieMap.empty[SomeCaseClass, String]
  val unmarshallResult = TrieMap.empty[String, SomeCaseClass]

  class MarshallerForTest extends Marshaller {

    def unmarshall[T: Manifest](jsonStr: String) =
      MarshallingTestObjects.unmarshallResult
                            .getOrElse(jsonStr, throw new UnsupportedOperationException)
                            .asInstanceOf[T]

    def marshall[T](t: T) =
      MarshallingTestObjects.marshallResult
                            .getOrElse(t.asInstanceOf[SomeCaseClass], throw new UnsupportedOperationException)
  }
} 
Example 59
Source File: CoreSpan.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core

import java.lang.Long

import com.comcast.money.api._

import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap


case class CoreSpan(
  id: SpanId,
  name: String,
  handler: SpanHandler) extends Span {

  private var startTimeMillis: Long = 0L
  private var startTimeMicros: Long = 0L
  private var endTimeMillis: Long = 0L
  private var endTimeMicros: Long = 0L
  private var success: java.lang.Boolean = true

  // use concurrent maps
  private val timers = new TrieMap[String, Long]()
  private val noted = new TrieMap[String, Note[_]]()

  def start(): Unit = {
    startTimeMillis = System.currentTimeMillis
    startTimeMicros = System.nanoTime / 1000
  }

  def stop(): Unit = stop(true)

  def stop(result: java.lang.Boolean): Unit = {
    endTimeMillis = System.currentTimeMillis
    endTimeMicros = System.nanoTime / 1000
    this.success = result

    // process any hanging timers
    val openTimers = timers.keys
    openTimers.foreach(stopTimer)

    handler.handle(info)
  }

  def stopTimer(timerKey: String): Unit =
    timers.remove(timerKey) foreach {
      timerStartInstant =>
        record(Note.of(timerKey, System.nanoTime - timerStartInstant))
    }

  def record(note: Note[_]): Unit = noted += note.name -> note

  def startTimer(timerKey: String): Unit = timers += timerKey -> System.nanoTime

  def info(): SpanInfo =
    CoreSpanInfo(
      id,
      name,
      startTimeMillis,
      startTimeMicros,
      endTimeMillis,
      endTimeMicros,
      calculateDuration,
      success,
      noted.toMap[String, Note[_]].asJava)

  private def calculateDuration: Long =
    if (endTimeMicros <= 0L && startTimeMicros <= 0L)
      0L
    else if (endTimeMicros <= 0L)
      (System.nanoTime() / 1000) - startTimeMicros
    else
      endTimeMicros - startTimeMicros
} 
Example 60
Source File: LanguageClient.scala    From lsp4s   with Apache License 2.0 5 votes vote down vote up
package scala.meta.jsonrpc

import cats.syntax.either._
import io.circe.Decoder
import io.circe.Encoder
import io.circe.syntax._
import java.io.OutputStream
import java.nio.ByteBuffer
import monix.eval.Callback
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Cancelable
import monix.execution.atomic.Atomic
import monix.execution.atomic.AtomicInt
import monix.reactive.Observer
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import MonixEnrichments._
import scribe.LoggerSupport

class LanguageClient(out: Observer[ByteBuffer], logger: LoggerSupport)
    extends JsonRpcClient {
  def this(out: OutputStream, logger: LoggerSupport) =
    this(Observer.fromOutputStream(out, logger), logger)
  private val writer = new MessageWriter(out, logger)
  private val counter: AtomicInt = Atomic(1)
  private val activeServerRequests =
    TrieMap.empty[RequestId, Callback[Response]]
  def notify[A: Encoder](method: String, notification: A): Future[Ack] =
    writer.write(Notification(method, Some(notification.asJson)))
  def serverRespond(response: Response): Future[Ack] = response match {
    case Response.Empty => Ack.Continue
    case x: Response.Success => writer.write(x)
    case x: Response.Error =>
      logger.error(s"Response error: $x")
      writer.write(x)
  }
  def clientRespond(response: Response): Unit =
    for {
      id <- response match {
        case Response.Empty => None
        case Response.Success(_, requestId) => Some(requestId)
        case Response.Error(_, requestId) => Some(requestId)
      }
      callback <- activeServerRequests.get(id).orElse {
        logger.error(s"Response to unknown request: $response")
        None
      }
    } {
      activeServerRequests.remove(id)
      callback.onSuccess(response)
    }

  def request[A: Encoder, B: Decoder](
      method: String,
      request: A
  ): Task[Either[Response.Error, B]] = {
    val nextId = RequestId(counter.incrementAndGet())
    val response = Task.create[Response] { (out, cb) =>
      val scheduled = out.scheduleOnce(Duration(0, "s")) {
        val json = Request(method, Some(request.asJson), nextId)
        activeServerRequests.put(nextId, cb)
        writer.write(json)
      }
      Cancelable { () =>
        scheduled.cancel()
        this.notify("$/cancelRequest", CancelParams(nextId.value))
      }
    }
    response.map {
      case Response.Empty =>
        Left(
          Response.invalidParams(
            s"Got empty response for request $request",
            nextId
          )
        )
      case err: Response.Error =>
        Left(err)
      case Response.Success(result, _) =>
        result.as[B].leftMap { err =>
          Response.invalidParams(err.toString, nextId)
        }
    }
  }
}

object LanguageClient {
  def fromOutputStream(out: OutputStream, logger: LoggerSupport) =
    new LanguageClient(Observer.fromOutputStream(out, logger), logger)
} 
Example 61
Source File: DataApi.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.data

import com.allaboutscala.donutstore.common.{Donut, Donuts}
import com.typesafe.scalalogging.LazyLogging

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global


  private val donutDatabase = TrieMap.empty[String, Donut]

  override def createDonut(donut: Donut): Future[String] = Future {
    logger.info(s"Create donut = $donut")
    val donutExists = donutDatabase.putIfAbsent(donut.name, donut)
    donutExists match {
      case Some(d) => s"${d.name} already exists in database."
      case None => s"${donut.name} has been added to the database."
    }
  }

  override def fetchDonuts(): Future[Donuts] = Future {
    logger.info("Fetching all donuts")
    Donuts(donutDatabase.values.toSeq)
  }

  override def updateDonutIngredients(donut: Donut): Future[String] = Future {
    logger.info(s"Updating ingredients = ${donut.ingredients} for donutName = ${donut.name}")
    val someDonut = donutDatabase.get(donut.name)
    someDonut match {
      case Some(d) =>
        donutDatabase.replace(d.name, donut)
        s"Updated donut ingredients for donutName = ${donut.name}"

      case None =>
        s"Donut ${donut.name} does not exist in database. The update operation was not run."
    }
  }

  override def deleteDonut(donutName: String): Future[String] = Future {
    logger.info(s"Deleting donut = $donutName")
    val someDonut = donutDatabase.get(donutName)
    someDonut match {
      case Some(d) =>
        donutDatabase.remove(d.name)
        s"Deleted $d from database."

      case None =>
        s"$donutName does not exist in database. The delete operation was not run."
    }
  }
} 
Example 62
Source File: DataApi.scala    From scala-for-beginners   with Apache License 2.0 5 votes vote down vote up
package com.allaboutscala.donutstore.data


import com.allaboutscala.donutstore.common.{Donut, Donuts}
import com.typesafe.scalalogging.LazyLogging

import scala.collection.concurrent.TrieMap
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future


  private val donutDatabase = TrieMap.empty[String, Donut]

  override def createDonut(donut: Donut): Future[String] = Future {
    logger.info(s"Create donut = $donut")
    val donutExists = donutDatabase.putIfAbsent(donut.name, donut)
    donutExists match {
      case Some(d) => s"${d.name} already exists in database."
      case None => s"${donut.name} has been added to the database."
    }
  }

  override def fetchDonuts(): Future[Donuts] = Future {
    logger.info("Fetching all donuts")
    Donuts(donutDatabase.values.toSeq)
  }

  override def updateDonutIngredients(donut: Donut): Future[String] = Future {
    logger.info(s"Updating ingredients = ${donut.ingredients} for donutName = ${donut.name}")
    val someDonut = donutDatabase.get(donut.name)
    someDonut match {
      case Some(d) =>
        donutDatabase.replace(d.name, donut)
        s"Updated donut ingredients for donutName = ${donut.name}"

      case None =>
        s"Donut ${donut.name} does not exist in database. The update operation was not run."
    }
  }

  override def deleteDonut(donutName: String): Future[String] = Future {
    logger.info("Deleting donut = $donutName")
    val someDonut = donutDatabase.get(donutName)
    someDonut match {
      case Some(d) =>
        donutDatabase.remove(d.name)
        s"Deleted ${d.name} from database."

      case None =>
        s"$donutName does not exist in database. The delete operation was not run."
    }
  }
} 
Example 63
Source File: Telemetry.scala    From finagle-prometheus   with MIT License 5 votes vote down vote up
package com.samstarling.prometheusfinagle.metrics

import io.prometheus.client._

import scala.collection.concurrent.TrieMap

// TODO: Make namespace optional
class Telemetry(registry: CollectorRegistry, namespace: String) {

  private val counters = TrieMap.empty[String, Counter]
  private val histograms = TrieMap.empty[String, Histogram]
  private val gauges = TrieMap.empty[String, Gauge]

  private def cacheKeyFor(name: String): String = s"${namespace}_$name"

  // TODO: Support injecting default labels

  def counter(name: String,
              help: String = "No help provided",
              labelNames: Seq[String] = Seq.empty): Counter = {
    counters.getOrElseUpdate(cacheKeyFor(name), {
      Counter
        .build()
        .namespace(namespace)
        .name(name)
        .help(help)
        .labelNames(labelNames: _*)
        .register(registry)
    })
  }

  def histogram(name: String,
                help: String = "No help provided",
                labelNames: Seq[String] = Seq.empty,
                buckets: Seq[Double] = Seq(0.1, 0.5, 1.0, 5.0)): Histogram = {
    histograms.getOrElseUpdate(cacheKeyFor(name), {
      Histogram
        .build()
        .namespace(namespace)
        .name(name)
        .help(help)
        .buckets(buckets: _*)
        .labelNames(labelNames: _*)
        .register(registry)
    })
  }

  def gauge(name: String,
            help: String = "No help provided",
            labelNames: Seq[String] = Seq.empty): Gauge = {
    gauges.getOrElseUpdate(cacheKeyFor(name), {
      Gauge
        .build()
        .namespace(namespace)
        .name(name)
        .help(help)
        .labelNames(labelNames: _*)
        .register(registry)
    })
  }
}