java.util.concurrent.ThreadLocalRandom Scala Examples
The following examples show how to use java.util.concurrent.ThreadLocalRandom.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: HasDex.scala From matcher with MIT License | 8 votes |
package com.wavesplatform.dex.it.dex import java.util.Properties import java.util.concurrent.ThreadLocalRandom import cats.Functor import com.typesafe.config.{Config, ConfigFactory} import com.wavesplatform.dex.it.api.BaseContainersKit import com.wavesplatform.dex.it.docker.DexContainer import com.wavesplatform.dex.it.fp.CanExtract import mouse.any._ import org.apache.kafka.clients.admin.{AdminClient, NewTopic} import scala.collection.JavaConverters._ trait HasDex { self: BaseContainersKit => private val defaultTag = Option(System.getenv("DEX_TAG")).getOrElse("latest") protected implicit def toDexExplicitGetOps[F[_]: CanExtract: Functor](self: DexApi[F]): DexApiOps.ExplicitGetDexApiOps[F] = { new DexApiOps.ExplicitGetDexApiOps[F](self) } protected def dexInitialSuiteConfig: Config = ConfigFactory.empty() protected lazy val dexRunConfig: Config = dexQueueConfig(ThreadLocalRandom.current.nextInt(0, Int.MaxValue)) protected def kafkaServer: Option[String] = Option { System.getenv("KAFKA_SERVER") } protected def dexQueueConfig(queueId: Int): Config = { kafkaServer.fold { ConfigFactory.empty() } { kafkaServer => ConfigFactory.parseString(s"""waves.dex.events-queue { | type = kafka | kafka { | servers = "$kafkaServer" | topic = "dex-$queueId" | } |}""".stripMargin) } } protected def createDex(name: String, runConfig: Config = dexRunConfig, suiteInitialConfig: Config = dexInitialSuiteConfig, tag: String = defaultTag): DexContainer = DexContainer(name, networkName, network, getIp(name), runConfig, suiteInitialConfig, localLogsDir, tag) unsafeTap addKnownContainer lazy val dex1: DexContainer = createDex("dex-1") protected def createKafkaTopic(name: String): Unit = kafkaServer.foreach { server => val properties = new Properties() properties.putAll( Map( "bootstrap.servers" -> server, "group.id" -> s"create-$name", "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer", "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer" ).asJava ) val adminClient = AdminClient.create(properties) try { val newTopic = new NewTopic(name, 1, 1.toShort) adminClient.createTopics(java.util.Collections.singletonList(newTopic)) } finally { adminClient.close() } } }
Example 2
Source File: AkkaDiscoveryHelper.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.client import java.net.URI import java.net.URISyntaxException import java.util.concurrent.ThreadLocalRandom import java.util.concurrent.TimeUnit import akka.discovery.ServiceDiscovery import akka.discovery.ServiceDiscovery.ResolvedTarget import com.typesafe.config.Config import org.slf4j.LoggerFactory import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ private[lagom] class AkkaDiscoveryHelper(config: Config, serviceDiscovery: ServiceDiscovery)( implicit ec: ExecutionContext ) { private val logger = LoggerFactory.getLogger(this.getClass) private val serviceNameMapper = new ServiceNameMapper(config) private val lookupTimeout = config.getDuration("lookup-timeout", TimeUnit.MILLISECONDS).millis def locateAll(name: String): Future[Seq[URI]] = { val serviceLookup = serviceNameMapper.mapLookupQuery(name) serviceDiscovery .lookup(serviceLookup.lookup, lookupTimeout) .map { resolved => logger.debug("Retrieved addresses: {}", resolved.addresses) resolved.addresses.map(target => toURI(target, serviceLookup)) } } def locate(name: String): Future[Option[URI]] = locateAll(name).map(selectRandomURI) private def toURI(resolvedTarget: ResolvedTarget, lookup: ServiceLookup): URI = { val port = resolvedTarget.port.getOrElse(-1) val scheme = lookup.scheme.orNull try { new URI( scheme, // scheme null, // userInfo resolvedTarget.host, // host port, // port null, // path null, // query null // fragment ) } catch { case e: URISyntaxException => throw new RuntimeException(e) } } private def selectRandomURI(uris: Seq[URI]) = uris match { case Nil => None case Seq(one) => Some(one) case many => Some(many(ThreadLocalRandom.current().nextInt(many.size))) } }
Example 3
Source File: Implicits.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.generator.utils import java.util.concurrent.ThreadLocalRandom import com.wavesplatform.settings.Constants object Implicits { final implicit class IteratorUtilsOps(val self: Iterator.type) extends AnyVal { private def random = ThreadLocalRandom.current def randomContinually[A](orig: Seq[A]): Iterator[A] = new Iterator[A] { private val origSize = orig.size override val hasNext: Boolean = true override def next(): A = orig(random.nextInt(origSize)) } } final implicit class DoubleExt(val d: Double) extends AnyVal { def waves: Long = (BigDecimal(d) * Constants.UnitsInWave).toLong } }
Example 4
Source File: SmartGenerator.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.generator import java.util.concurrent.ThreadLocalRandom import cats.Show import com.wavesplatform.account.KeyPair import com.wavesplatform.common.state.ByteStr import com.wavesplatform.common.utils.EitherExt2 import com.wavesplatform.generator.utils.Gen import com.wavesplatform.generator.utils.Implicits.DoubleExt import com.wavesplatform.lang.script.Script import com.wavesplatform.lang.v1.estimator.ScriptEstimator import com.wavesplatform.transaction.Asset.Waves import com.wavesplatform.transaction.assets.exchange.{AssetPair, ExchangeTransaction, Order} import com.wavesplatform.transaction.smart.SetScriptTransaction import com.wavesplatform.transaction.transfer.TransferTransaction import com.wavesplatform.transaction.{Asset, Transaction, TxVersion} import scala.concurrent.duration._ class SmartGenerator(settings: SmartGenerator.Settings, val accounts: Seq[KeyPair], estimator: ScriptEstimator) extends TransactionGenerator { private def r = ThreadLocalRandom.current private def randomFrom[T](c: Seq[T]): Option[T] = if (c.nonEmpty) Some(c(r.nextInt(c.size))) else None override def next(): Iterator[Transaction] = generate(settings).iterator private def generate(settings: SmartGenerator.Settings): Seq[Transaction] = { val bank = randomFrom(accounts).get val fee = 0.005.waves val script: Script = Gen.script(settings.complexity, estimator) val setScripts = Range(0, settings.scripts) flatMap ( _ => accounts.map { i => SetScriptTransaction.selfSigned(1.toByte, i, Some(script), 1.waves, System.currentTimeMillis()).explicitGet() } ) val now = System.currentTimeMillis() val txs = Range(0, settings.transfers).map { i => TransferTransaction .selfSigned(2.toByte, bank, bank.toAddress, Waves, 1.waves - 2 * fee, Waves, fee, ByteStr.empty, now + i) .explicitGet() } val extxs = Range(0, settings.exchange).map { i => val ts = now + i val matcher = randomFrom(accounts).get val seller = randomFrom(accounts).get val buyer = randomFrom(accounts).get val asset = randomFrom(settings.assets.toSeq) val tradeAssetIssue = ByteStr.decodeBase58(asset.get).toOption val pair = AssetPair(Waves, Asset.fromCompatId(tradeAssetIssue)) val sellOrder = Order.sell(TxVersion.V2, seller, matcher.publicKey, pair, 100000000L, 1, ts, ts + 30.days.toMillis, 0.003.waves) val buyOrder = Order.buy(TxVersion.V2, buyer, matcher.publicKey, pair, 100000000L, 1, ts, ts + 1.day.toMillis, 0.003.waves) ExchangeTransaction.signed(TxVersion.V2, matcher.privateKey, buyOrder, sellOrder, 100000000, 1, 0.003.waves, 0.003.waves, 0.011.waves, ts).explicitGet() } setScripts ++ txs ++ extxs } } object SmartGenerator { final case class Settings(scripts: Int, transfers: Int, complexity: Boolean, exchange: Int, assets: Set[String]) { require(scripts >= 0) require(transfers >= 0) require(exchange >= 0) } object Settings { implicit val toPrintable: Show[Settings] = { x => import x._ s""" | set-scripts = $scripts | transfers = $transfers | complexity = $complexity | exchange = $exchange | assets = $assets """.stripMargin } } }
Example 5
Source File: Base58Benchmark.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.common import java.util.concurrent.{ThreadLocalRandom, TimeUnit} import com.wavesplatform.common.Base58Benchmark.{Base58St, BytesSt} import com.wavesplatform.common.utils.{Base58, FastBase58, StdBase58} import org.openjdk.jmh.annotations._ import org.openjdk.jmh.infra.Blackhole @OutputTimeUnit(TimeUnit.SECONDS) @BenchmarkMode(Array(Mode.Throughput)) @Threads(4) @Fork(1) @Warmup(iterations = 10) @Measurement(iterations = 10) class Base58Benchmark { @Benchmark def base58_fastEncode_test(st: BytesSt, bh: Blackhole): Unit = bh.consume(FastBase58.encode(st.bytes)) @Benchmark def base58_encode_test(st: BytesSt, bh: Blackhole): Unit = bh.consume(StdBase58.encode(st.bytes)) @Benchmark def base58_decode_test(st: Base58St, bh: Blackhole): Unit = bh.consume(StdBase58.decode(st.base58)) @Benchmark def base58_fastDecode_test(st: Base58St, bh: Blackhole): Unit = bh.consume(FastBase58.decode(st.base58)) } object Base58Benchmark { def randomBytes(length: Int): Array[Byte] = { val bytes = new Array[Byte](length) ThreadLocalRandom.current().nextBytes(bytes) bytes } @State(Scope.Benchmark) class BytesSt { val bytes = randomBytes(10000) } @State(Scope.Benchmark) class Base58St extends BytesSt { val base58 = Base58.encode(bytes) } }
Example 6
Source File: StatsSample.scala From fusion-data with Apache License 2.0 | 5 votes |
package sample.cluster.stats import java.util.concurrent.ThreadLocalRandom import akka.actor.{ Actor, ActorSystem, Address, Props, RelativeActorPath, RootActorPath } import akka.cluster.ClusterEvent._ import akka.cluster.{ Cluster, MemberStatus } import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ object StatsSample { def main(args: Array[String]): Unit = if (args.isEmpty) { startup(Seq("2551", "2552", "0")) StatsSampleClient.main(Array.empty) } else { startup(args) } def startup(ports: Seq[String]): Unit = ports foreach { port => // Override the configuration of the port when specified as program argument val config = ConfigFactory .parseString(s""" akka.remote.netty.tcp.port=$port akka.remote.artery.canonical.port=$port """) .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]")) .withFallback(ConfigFactory.load("stats1")) val system = ActorSystem("ClusterSystem", config) system.actorOf(Props[StatsWorker], name = "statsWorker") system.actorOf(Props[StatsService], name = "statsService") } } object StatsSampleClient { def main(args: Array[String]): Unit = { // note that client is not a compute node, role not defined val system = ActorSystem("ClusterSystem") system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client") } } class StatsSampleClient(servicePath: String) extends Actor { val cluster = Cluster(context.system) val servicePathElements = servicePath match { case RelativeActorPath(elements) => elements case _ => throw new IllegalArgumentException("servicePath [%s] is not a valid relative actor path" format servicePath) } import context.dispatcher val tickTask = context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick") var nodes = Set.empty[Address] override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent]) override def postStop(): Unit = { cluster.unsubscribe(self) tickTask.cancel() } def receive = { case "tick" if nodes.nonEmpty => // just pick any one val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size)) val service = context.actorSelection(RootActorPath(address) / servicePathElements) service ! StatsJob("this is the text that will be analyzed") case result: StatsResult => println(result) case failed: JobFailed => println(failed) case state: CurrentClusterState => nodes = state.members.collect { case m if m.hasRole("compute") && m.status == MemberStatus.Up => m.address } case MemberUp(m) if m.hasRole("compute") => nodes += m.address case other: MemberEvent => nodes -= other.member.address case UnreachableMember(m) => nodes -= m.address case ReachableMember(m) if m.hasRole("compute") => nodes += m.address } }
Example 7
Source File: TSQR.scala From SparkAndMPIFactorizations with MIT License | 5 votes |
package edu.berkeley.cs.amplab.mlmatrix import java.util.concurrent.ThreadLocalRandom import scala.collection.mutable.ArrayBuffer import breeze.linalg._ import edu.berkeley.cs.amplab.mlmatrix.util.QRUtils import edu.berkeley.cs.amplab.mlmatrix.util.Utils import org.apache.spark.rdd.RDD import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.spark.Accumulator import org.apache.spark.SparkContext._ import java.util.Calendar import java.text.SimpleDateFormat class modifiedTSQR extends Serializable { def report(message: String, verbose: Boolean = true) = { val now = Calendar.getInstance().getTime() val formatter = new SimpleDateFormat("H:m:s") if (verbose) { println("STATUS REPORT (" + formatter.format(now) + "): " + message) } } private def reduceQR( acc: Accumulator[Double], a: Tuple2[DenseVector[Double], DenseMatrix[Double]], b: Tuple2[DenseVector[Double], DenseMatrix[Double]]): Tuple2[DenseVector[Double], DenseMatrix[Double]] = { val begin = System.nanoTime val outmat = QRUtils.qrR(DenseMatrix.vertcat(a._2, b._2), false) val outcolnorms = a._1 + b._1 acc += ((System.nanoTime - begin) / 1e6) (outcolnorms, outmat) } }
Example 8
Source File: Retry.scala From futiles with Apache License 2.0 | 5 votes |
package markatta.futiles import java.util.concurrent.{ThreadLocalRandom, TimeUnit} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.Random object Retry { private val alwaysRetry: Throwable => Boolean = _ => true def retryWithBackOff[A]( times: Int, backOffUnit: FiniteDuration, shouldRetry: Throwable => Boolean = alwaysRetry )(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] = try { if (times <= 1) fBlock else retryWithBackOffLoop(times, 1, backOffUnit, shouldRetry)(fBlock) } catch { // failure to actually create the future case x: Throwable => Future.failed(x) } private def retryWithBackOffLoop[A]( totalTimes: Int, timesTried: Int, backOffUnit: FiniteDuration, shouldRetry: Throwable => Boolean )(fBlock: => Future[A])(implicit ec: ExecutionContext): Future[A] = if (totalTimes <= timesTried) fBlock else fBlock.recoverWith { case ex: Throwable if shouldRetry(ex) => val timesTriedNow = timesTried + 1 val backOff = nextBackOff(timesTriedNow, backOffUnit) Timeouts .timeout(backOff)(()) .flatMap( _ => retryWithBackOffLoop( totalTimes, timesTriedNow, backOffUnit, shouldRetry )(fBlock) ) } private[futiles] def nextBackOff( tries: Int, backOffUnit: FiniteDuration ): FiniteDuration = { require(tries > 0, "tries should start from 1") val rng = new Random(ThreadLocalRandom.current()) // jitter between 0.5 and 1.5 val jitter = 0.5 + rng.nextDouble() val factor = math.pow(2, tries) * jitter FiniteDuration( (backOffUnit.toMillis * factor).toLong, TimeUnit.MILLISECONDS ) } }
Example 9
Source File: IotManager.scala From akka-iot-mqtt-v2 with GNU Lesser General Public License v3.0 | 5 votes |
package akkaiot import java.util.concurrent.ThreadLocalRandom import scala.concurrent.duration._ import akka.actor.{ Props, ActorRef, Actor, ActorLogging, Terminated } import akka.pattern._ import akka.util.Timeout import akka.cluster.client.ClusterClient.SendToAll import com.sandinh.paho.akka._ import com.sandinh.paho.akka.MqttPubSub._ object IotManager { def props(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef): Props = Props( new IotManager(clusterClient, numOfDevices, mqttPubSub) ) case class Ok(work: Work) case class NotOk(work: Work) } class IotManager(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef) extends Actor with ActorLogging { import IotManager._ import context.dispatcher private var idToActorMap = Map.empty[String, ActorRef] private var actorToIdMap = Map.empty[ActorRef, String] val deviceTypes = List("thermostat", "lamp", "security-alarm") def random = ThreadLocalRandom.current mqttPubSub ! Subscribe(MqttConfig.topic, self) override def preStart(): Unit = { log.info("IoT Manager -> Creating devices ...") (1 to numOfDevices).foreach { n => val deviceType = deviceTypes(random.nextInt(0, deviceTypes.size)) val deviceId = (1000 + n).toString val deviceActor = context.actorOf(Device.props(deviceType, deviceId, mqttPubSub), s"$deviceType-$deviceId") context.watch(deviceActor) actorToIdMap += deviceActor -> deviceId idToActorMap += deviceId -> deviceActor } log.info("IoT Manager -> Created {} devices!", numOfDevices) } override def postStop(): Unit = log.info("IoT Manager -> Stopped") override def receive: Receive = { case SubscribeAck(Subscribe(MqttConfig.topic, `self`, _)) => { log.info("IoT Manager -> MQTT subscription to {} acknowledged", MqttConfig.topic) context.become(ready) } case x => log.info("IoT Manager -> ALERT: Problem receiving message ... {}", x) } def ready: Receive = { case msg: Message => { val work = MqttConfig.readFromByteArray[Work](msg.payload) log.info("IoT Agent -> Received MQTT message: {}-{} | State {} | Setting {}", work.deviceType, work.deviceId, work.currState, work.currSetting) log.info("IoT Manager -> Sending work to cluster master") implicit val timeout = Timeout(5.seconds) (clusterClient ? SendToAll("/user/master/singleton", work)) map { case Master.Ack(_) => Ok(work) } recover { case _ => NotOk(work) } pipeTo { idToActorMap.getOrElse(work.deviceId, `self`) } } case result @ WorkResult(workId, deviceType, deviceId, nextState, nextSetting) => idToActorMap.get(deviceId) match { case Some(deviceActor) => deviceActor forward result log.info("IoT Manager -> Work result forwarded to {}-{} ", deviceType, deviceId) case None => log.info("IoT Manager -> ALERT: {}-{} NOT in registry!", deviceType, deviceId) } case Terminated(deviceActor) => val deviceId = actorToIdMap(deviceActor) log.info("IoT Manager -> ALERT: Device actor terminated! Device Id {} will be removed.", deviceId) actorToIdMap -= deviceActor idToActorMap -= deviceId case Ok(work) => log.info("IoT Manager -> ALERT: Receive ack from Master but Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId) case NotOk(work) => log.info("IoT Manager -> ALERT: Did not receive ack from Master and Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId) case x => log.info("IoT Manager -> ALERT: Problem with received message ... {}", x) } }
Example 10
Source File: Device.scala From akka-iot-mqtt-v2 with GNU Lesser General Public License v3.0 | 5 votes |
package akkaiot import java.util.UUID import java.util.concurrent.ThreadLocalRandom import scala.concurrent.duration._ import akka.actor.{ Props, ActorRef, Actor, ActorLogging } import com.sandinh.paho.akka._ import com.sandinh.paho.akka.MqttPubSub._ object Device { def props(deviceType: String, deviceId: String, mqttPubSub: ActorRef): Props = Props(new Device(deviceType, deviceId, mqttPubSub)) case object Tick } class Device(deviceType: String, deviceId: String, mqttPubSub: ActorRef) extends Actor with ActorLogging { import Device._ import context.dispatcher // deviceTypes = List("thermostat", "lamp", "security-alarm") private var opState: Int = 0 // 0|1|2 (OFF|HEAT|COOL) for thermostat, 0|1 (OFF|ON) for lamp|security-alarm private var setting: Int = 0 // 60-75 for themostat, 1-3 for lamp, 1-5 for security-alarm def scheduler = context.system.scheduler def random = ThreadLocalRandom.current def nextWorkId(): String = UUID.randomUUID().toString override def preStart(): Unit = { opState = deviceType match { case "thermostat" => random.nextInt(0, 2+1) case "lamp" => random.nextInt(0, 1+1) case "security-alarm" => random.nextInt(0, 1+1) case _ => 0 } setting = deviceType match { case "thermostat" => random.nextInt(60, 75+1) case "lamp" => random.nextInt(1, 3+1) case "security-alarm" => random.nextInt(1, 5+1) case _ => 0 } scheduler.scheduleOnce(5.seconds, self, Tick) log.info("Device -> {}-{} started", deviceType, deviceId) } override def postRestart(reason: Throwable): Unit = () override def postStop(): Unit = log.info("Device -> {}-{} stopped.", deviceType, deviceId) def receive = { case Tick => { val workId = nextWorkId() val work = Work(workId, deviceType, deviceId, opState, setting) log.info("Device -> {}-{} with state {} created work (Id: {}) ", deviceType, deviceId, opState, workId) val payload = MqttConfig.writeToByteArray(work) log.info("Device -> Publishing MQTT Topic {}: Device {}-{}", MqttConfig.topic, deviceType, deviceId) mqttPubSub ! new Publish(MqttConfig.topic, payload) context.become(waitAccepted(work, payload), discardOld = false) } case WorkResult(workId, deviceType, deviceId, nextState, nextSetting) => { log.info("Device -> {}-{} received work result with work Id {}.", deviceType, deviceId, workId) opState = nextState setting = nextSetting log.info("Device -> Updated {}-{} with state {} and setting {}.", deviceType, deviceId, opState, setting) } } def waitAccepted(work: Work, payload: Array[Byte]): Receive = { case IotManager.Ok(_) => log.info("Device -> Work for {}-{} accepted | Work Id {}", work.deviceType, work.deviceId, work.workId) context.unbecome() scheduler.scheduleOnce(random.nextInt(3, 10).seconds, self, Tick) case IotManager.NotOk(_) => log.info("Device -> ALERT: Work from {}-{} NOT ACCEPTED | Work Id {} | Retrying ... ", work.deviceType, work.deviceId, work.workId) scheduler.scheduleOnce(3.seconds, mqttPubSub, new Publish(MqttConfig.topic, payload)) } }
Example 11
Source File: WorkProcessor.scala From akka-iot-mqtt-v2 with GNU Lesser General Public License v3.0 | 5 votes |
package akkaiot import akka.actor.{ Props, Actor, ActorLogging } import java.util.concurrent.ThreadLocalRandom object WorkProcessor { def props(): Props = Props(new WorkProcessor) case class DeviceStateSetting(deviceType: String, state: Int, setting: Int) } class WorkProcessor extends Actor with ActorLogging { import WorkProcessor._ def random = ThreadLocalRandom.current def receive = { case work @ Work(workId, deviceType, deviceId, state, setting) => { val newStateSetting: DeviceStateSetting = deviceType match { case "thermostat" => nextDeviceStateSetting(work, Map(0->"OFF", 1->"HEAT", 2->"COOL"), "temperature", (60, 75), (-2, 2)) case "lamp" => nextDeviceStateSetting(work, Map(0->"OFF", 1->"ON"), "brightness", (1, 3), (-1, 1)) case "security-alarm" => nextDeviceStateSetting(work, Map(0->"OFF", 1->"ON"), "level", (1, 5), (-2, 2)) case _ => // Shouldn't happen (keep state/setting as is) log.info("Work Processor -> ALERT: Device type undefined! {}-{}", work.deviceType, work.deviceId) DeviceStateSetting(deviceType, state, setting) } val result = WorkResult(workId, deviceType, deviceId, newStateSetting.state, newStateSetting.setting) sender() ! Worker.WorkProcessed(result) } case _ => log.info("Work Processor -> ALERT: Received unknown message!") } def nextDeviceStateSetting( work: Work, stateMap: Map[Int, String], settingType: String, settingLimit: (Int, Int), changeLimit: (Int, Int) ): DeviceStateSetting = { val nextState = random.nextInt(0, stateMap.size) val nextStateText = if (nextState == work.currState) "Keep state " + stateMap(work.currState) else "Switch to " + stateMap(nextState) val randomChange = random.nextInt(changeLimit._1, changeLimit._2 + 1) val randomSetting = work.currSetting + randomChange val nextSettingChange = if (randomChange == 0) 0 else { if (randomSetting < settingLimit._1 || randomSetting > settingLimit._2) 0 else randomChange } val nextSetting = work.currSetting + nextSettingChange val nextSettingText = if (nextSettingChange == 0) s"NO $settingType change" else { if (nextSettingChange > 0) s"RAISE $settingType by $nextSettingChange" else s"LOWER $settingType by $nextSettingChange" } log.info("Work Processor -> {}-{}: {} | {}", work.deviceType, work.deviceId, nextStateText, nextSettingText) DeviceStateSetting(work.deviceType, nextState, nextSetting) } }
Example 12
Source File: Randomizer.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin.util import java.util.concurrent.ThreadLocalRandom trait Randomizer { def random: ThreadLocalRandom = ThreadLocalRandom.current() def randomString(alphabet: String)(n: Int): String = { Stream.continually(random.nextInt(alphabet.size)).map(alphabet).take(n).mkString } def randomString(n: Int): String = { randomString { """!"#$%&'()*+-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~""" }(n) } }
Example 13
Source File: Distribution.scala From reactive-async with BSD 2-Clause "Simplified" License | 5 votes |
package com.phaller.rasync.npv import java.util.concurrent.ThreadLocalRandom trait Distribution { def sample(): Double def getMax(): Double def getMin(): Double } class SingleValueDistribution(value: Double) extends Distribution { override def sample(): Double = value override def getMax(): Double = value override def getMin(): Double = value } class TriangleDistribution(min: Double, likely: Double, max: Double) extends Distribution { assert(max >= likely) assert(likely >= min) val fc: Double = (likely - min) / (max - min) override def sample(): Double = { val u = ThreadLocalRandom.current().nextDouble() if (u < fc) { min + Math.sqrt(u * (max - min) * (likely - min)) } else { max - Math.sqrt((1 - u) * (max - min) * (max - likely)) } } override def getMin(): Double = min def getLikely(): Double = likely override def getMax(): Double = max }
Example 14
Source File: SessionUtil.scala From akka-http-session with Apache License 2.0 | 5 votes |
package com.softwaremill.session import java.math.BigInteger import java.util.Base64 import java.util.concurrent.ThreadLocalRandom object SessionUtil { def randomString(length: Int): String = { // http://stackoverflow.com/questions/41107/how-to-generate-a-random-alpha-numeric-string val random = ThreadLocalRandom.current() new BigInteger(length * 5, random).toString(32) // because 2^5 = 32 } def randomServerSecret(): String = randomString(128) // Do not change this unless you understand the security issues behind timing attacks. // This method intentionally runs in constant time if the two strings have the same length. // If it didn't, it would be vulnerable to a timing attack. def constantTimeEquals(a: String, b: String): Boolean = { if (a.length != b.length) { false } else { var equal = 0 for (i <- Array.range(0, a.length)) { equal |= a(i) ^ b(i) } equal == 0 } } private val HexArray = "0123456789ABCDEF".toCharArray def toHexString(bytes: Array[Byte]): String = { // from https://stackoverflow.com/questions/9655181/how-to-convert-a-byte-array-to-a-hex-string-in-java val hexChars = new Array[Char](bytes.length * 2) var j = 0 while (j < bytes.length) { val v = bytes(j) & 0xFF hexChars(j * 2) = HexArray(v >>> 4) hexChars(j * 2 + 1) = HexArray(v & 0x0F) j += 1 } new String(hexChars) } def hexStringToByte(hexString: String): Array[Byte] = { // https://stackoverflow.com/questions/140131/convert-a-string-representation-of-a-hex-dump-to-a-byte-array-using-java val len = hexString.length val data = new Array[Byte](len / 2) var i = 0 while (i < len) { data(i / 2) = ((Character.digit(hexString.charAt(i), 16) << 4) + Character.digit(hexString.charAt(i + 1), 16)).toByte i += 2 } data } def toBase64_v0_5_2(bytes: Array[Byte]): String = { Base64.getUrlEncoder.encodeToString(bytes) } def parseBase64_v0_5_2(s: String): Array[Byte] = { Base64.getUrlDecoder.decode(s) } }
Example 15
Source File: Client.scala From zio-metrics with Apache License 2.0 | 5 votes |
package zio.metrics import zio.{ Fiber, Queue, RIO, Task, UIO, URIO, ZManaged, ZQueue } import zio.clock.Clock import zio.stream.ZStream import zio.duration.Duration.Finite import zio.metrics.encoders._ import java.util.concurrent.ThreadLocalRandom class Client(val bufferSize: Long, val timeout: Long, val queueCapacity: Int, host: Option[String], port: Option[Int]) { type UDPQueue = ZQueue[Nothing, Any, Encoder, Throwable, Nothing, Metric] val queue: UIO[Queue[Metric]] = ZQueue.bounded[Metric](queueCapacity) private val duration: Finite = Finite(timeout) val udpClient: ZManaged[Any, Throwable, UDPClient] = (host, port) match { case (None, None) => UDPClient() case (Some(h), Some(p)) => UDPClient(h, p) case (Some(h), None) => UDPClient(h, 8125) case (None, Some(p)) => UDPClient("localhost", p) } val sample: List[Metric] => Task[List[Metric]] = metrics => Task( metrics.filter( m => m match { case sm: SampledMetric => if (sm.sampleRate >= 1.0 || ThreadLocalRandom.current.nextDouble <= sm.sampleRate) true else false case _ => true } ) ) val udp: List[Metric] => RIO[Encoder, List[Int]] = metrics => for { sde <- RIO.environment[Encoder] flt <- sample(metrics) msgs <- RIO.foreach(flt)(sde.get.encode(_)) ints <- RIO.foreach(msgs.flatten)(s => udpClient.use(_.send(s))) } yield ints def listen(implicit queue: UDPQueue): URIO[Client.ClientEnv, Fiber[Throwable, Unit]] = listen[List, Int](udp) def listen[F[_], A]( f: List[Metric] => RIO[Encoder, F[A]] )(implicit queue: UDPQueue): URIO[Client.ClientEnv, Fiber[Throwable, Unit]] = ZStream .fromQueue[Encoder, Throwable, Metric](queue) .groupedWithin(bufferSize, duration) .mapM(l => f(l)) .runDrain .fork val send: Queue[Metric] => Metric => Task[Unit] = q => metric => for { _ <- q.offer(metric) } yield () val sendAsync: Queue[Metric] => Metric => Task[Unit] = q => metric => for { _ <- q.offer(metric).fork } yield () } object Client { type ClientEnv = Encoder with Clock //with Console def apply(): Client = apply(5, 5000, 100, None, None) def apply(bufferSize: Long, timeout: Long): Client = apply(bufferSize, timeout, 100, None, None) def apply(bufferSize: Long, timeout: Long, queueCapacity: Int): Client = apply(bufferSize, timeout, queueCapacity, None, None) def apply(bufferSize: Long, timeout: Long, queueCapacity: Int, host: Option[String], port: Option[Int]): Client = new Client(bufferSize, timeout, queueCapacity, host, port) }
Example 16
Source File: AkkaDiscoveryHelper.scala From lagom-akka-discovery-service-locator with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.client import java.net.URI import java.net.URISyntaxException import java.util.concurrent.ThreadLocalRandom import java.util.concurrent.TimeUnit import akka.discovery.ServiceDiscovery import akka.discovery.ServiceDiscovery.ResolvedTarget import com.typesafe.config.Config import org.slf4j.LoggerFactory import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ private[lagom] class AkkaDiscoveryHelper(config: Config, serviceDiscovery: ServiceDiscovery)( implicit ec: ExecutionContext) { private val logger = LoggerFactory.getLogger(this.getClass) private val serviceNameMapper = new ServiceNameMapper(config) private val lookupTimeout = config.getDuration("lookup-timeout", TimeUnit.MILLISECONDS).millis def locateAll(name: String): Future[Seq[URI]] = { val serviceLookup = serviceNameMapper.mapLookupQuery(name) serviceDiscovery .lookup(serviceLookup.lookup, lookupTimeout) .map { resolved => logger.debug("Retrieved addresses: {}", resolved.addresses) resolved.addresses.map(target => toURI(target, serviceLookup)) } } def locate(name: String): Future[Option[URI]] = locateAll(name).map(selectRandomURI) private def toURI(resolvedTarget: ResolvedTarget, lookup: ServiceLookup): URI = { val port = resolvedTarget.port.getOrElse(-1) val scheme = lookup.scheme.orNull try { new URI(scheme, // scheme null, // userInfo resolvedTarget.host, // host port, // port null, // path null, // query null // fragment ) } catch { case e: URISyntaxException => throw new RuntimeException(e) } } private def selectRandomURI(uris: Seq[URI]) = uris match { case Nil => None case Seq(one) => Some(one) case many => Some(many(ThreadLocalRandom.current().nextInt(many.size))) } }
Example 17
Source File: WindowedWordCount.scala From beam-scala-examples with Apache License 2.0 | 5 votes |
package org.apache.beam.examples import java.util.concurrent.ThreadLocalRandom import org.apache.beam.sdk.Pipeline import org.apache.beam.sdk.io.TextIO import org.apache.beam.sdk.options._ import org.apache.beam.sdk.transforms.DoFn.ProcessElement import org.apache.beam.sdk.transforms.windowing.{FixedWindows, Window} import org.apache.beam.sdk.transforms.{Count, DoFn, MapElements, ParDo} import org.joda.time.{Duration, Instant} object WindowedWordCount { def main(args: Array[String]): Unit = { val options = PipelineOptionsFactory .fromArgs(args: _*) .withValidation() .as(classOf[WindowedWordCountOptions]) val minTimestamp = new Instant(options.getMinTimestampMillis) val maxTimestamp = new Instant(options.getMaxTimestampMillis) val pipeline = Pipeline.create(options) pipeline.apply("ReadFiles", TextIO.read().from(options.getInputFile)) .apply(ParDo.of(new AddTimestampFn(minTimestamp, maxTimestamp))) .apply(Window.into[String](FixedWindows.of(Duration.standardMinutes(options.getWindowSize)))) .apply(ParDo.of(new ExtractWords)) .apply(Count.perElement()) .apply(MapElements.via(new FormatResult)) .apply("WriteWords", TextIO.write() .to(options.getOutput) .withWindowedWrites() .withNumShards(options.getNumShards)) pipeline.run().waitUntilFinish() } } // ======================================= Options ============================================= trait WindowedWordCountOptions extends WordCountOptions { @Description("Fixed window duration, in minutes") @Default.Long(1) def getWindowSize: Long def setWindowSize(value: Long): Unit @Description("Minimum randomly assigned timestamp, in milliseconds-since-epoch") @Default.InstanceFactory(classOf[DefaultToCurrentSystemTime]) def getMinTimestampMillis: Long def setMinTimestampMillis(value: Long): Unit @Description("Maximum randomly assigned timestamp, in milliseconds-since-epoch") @Default.InstanceFactory(classOf[DefaultToMinTimestampPlusOneHour]) def getMaxTimestampMillis: Long def setMaxTimestampMillis(value: Long): Unit @Description("Fixed number of shards to produce per window, or null for runner-chosen sharding") @Default.Integer(1) def getNumShards: Integer def setNumShards(numShards: Integer): Unit } // ======================================== UDFs ================================================ class AddTimestampFn(minTimestamp: Instant, maxTimestamp: Instant) extends DoFn[String, String] { @ProcessElement def processElement(c: ProcessContext): Unit = { val randomTS = new Instant(ThreadLocalRandom.current.nextLong(minTimestamp.getMillis, maxTimestamp.getMillis)) c.outputWithTimestamp(c.element(), new Instant(randomTS)) } } // ====================================== Defaults ============================================== class DefaultToCurrentSystemTime extends DefaultValueFactory[Long] { override def create(options: PipelineOptions) = { System.currentTimeMillis() } } class DefaultToMinTimestampPlusOneHour extends DefaultValueFactory[Long] { override def create(options: PipelineOptions): Long = { options.as(classOf[WindowedWordCountOptions]) .getMinTimestampMillis + Duration.standardHours(1).getMillis } }
Example 18
Source File: package.scala From amadou with Apache License 2.0 | 5 votes |
package com.mediative.amadou import com.google.api.services.bigquery.model._ import com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem import com.google.cloud.hadoop.io.bigquery._ import org.apache.hadoop.fs.{FileSystem, Path} import net.ceedubs.ficus.readers.ValueReader import net.ceedubs.ficus.FicusInstances import org.apache.spark.sql.{Dataset, SparkSession, Encoder} import java.util.concurrent.ThreadLocalRandom import scala.collection.JavaConversions._ package object bigquery extends FicusInstances { object CreateDisposition extends Enumeration { val CREATE_IF_NEEDED, CREATE_NEVER = Value } object WriteDisposition extends Enumeration { val WRITE_TRUNCATE, WRITE_APPEND, WRITE_EMPTY = Value } val BQ_CSV_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss zzz" object TableNotFound { import com.google.api.client.googleapis.json.GoogleJsonResponseException import com.google.api.client.googleapis.json.GoogleJsonError import scala.collection.JavaConverters._ def unapply(error: Throwable): Option[GoogleJsonError.ErrorInfo] = error match { case error: GoogleJsonResponseException => Some(error.getDetails) .filter(_.getCode == 404) .flatMap(_.getErrors.asScala.find(_.getReason == "notFound")) case _ => None } } def tableHasDataForDate( spark: SparkSession, table: TableReference, date: java.sql.Date, column: String): Boolean = { val bq = BigQueryClient.getInstance(spark.sparkContext.hadoopConfiguration) bq.hasDataForDate(table, date, column) } def saveAsBigQueryTable( tableRef: TableReference, writeDisposition: WriteDisposition.Value, createDisposition: CreateDisposition.Value): Unit = { val bucket = conf.get(BigQueryConfiguration.GCS_BUCKET_KEY) val temp = s"spark-bigquery-${System.currentTimeMillis()}=${ThreadLocalRandom.current.nextInt(Int.MaxValue)}" val gcsPath = s"gs://$bucket/spark-bigquery-tmp/$temp" self.write.json(gcsPath) val schemaFields = self.schema.fields.map { field => import org.apache.spark.sql.types._ val fieldType = field.dataType match { case BooleanType => "BOOLEAN" case LongType => "INTEGER" case IntegerType => "INTEGER" case StringType => "STRING" case DoubleType => "FLOAT" case TimestampType => "TIMESTAMP" case _: DecimalType => "INTEGER" } new TableFieldSchema().setName(field.name).setType(fieldType) }.toList val tableSchema = new TableSchema().setFields(schemaFields) bq.load(gcsPath, tableRef, tableSchema, writeDisposition, createDisposition) delete(new Path(gcsPath)) } private def delete(path: Path): Unit = { val fs = FileSystem.get(path.toUri, conf) fs.delete(path, true) () } } implicit val valueReader: ValueReader[BigQueryTable.PartitionStrategy] = ValueReader[String].map { _ match { case "month" => BigQueryTable.PartitionByMonth case "day" => BigQueryTable.PartitionByDay case other => sys.error(s"Unknown partition strategy") } } }
Example 19
Source File: TextMessageGenerator.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import java.util.concurrent.ThreadLocalRandom import scala.collection.mutable.ListBuffer object TextMessageGenerator { val alphabetSet: Set[Char] = ('a' to 'z').toSet val alphabets = alphabetSet.toList val vowelSet: Set[Char] = Set('a', 'e', 'i', 'o', 'u') val vowels = vowelSet.toList val consonantSet: Set[Char] = alphabetSet -- vowelSet val consonants = consonantSet.toList // Subset of Punct character class """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" val puncts: String = """.,;?!""" def random = ThreadLocalRandom.current def randomChar: Char = alphabets(random.nextInt(0, alphabets.length)) def mostlyVowelChar: Char = { // 4/5th chance of vowel val isVowel: Boolean = if (random.nextInt(0, 5) > 0) true else false if (isVowel) vowels(random.nextInt(0, vowels.length)) else consonants(random.nextInt(0, consonants.length)) } def maybeUpperChar: Char = { // 1/5th chance of uppercase val isUppercase: Boolean = if (random.nextInt(0, 5) == 0) true else false if (isUppercase) Character.toUpperCase(randomChar) else randomChar } // Generate a word within a range of lengths def genRandWord(minLen: Int, maxLen: Int): String = { var word = new ListBuffer[Char]() val wordLen: Int = random.nextInt(minLen, maxLen + 1) for (i <- 1 to wordLen) { val char = if (i == 1) maybeUpperChar else if (i % 2 == 0) mostlyVowelChar else randomChar word += char } word.mkString } def genRandTextWithKeyword(minWordsInText: Int, maxWordsInText: Int, minWordLen: Int = 2, maxWordLen: Int = 8, minWordsInClause: Int = 1, maxWordsInClause: Int = 10, keyword: String ): String = { val randomLevel: Double = 0.05 var text = new ListBuffer[String]() val numWordsInText: Int = random.nextInt(minWordsInText, maxWordsInText + 1) var wordCount: Int = 0 var textLen: Int = 0 while (wordCount < numWordsInText) { val numWords = random.nextInt(minWordsInClause, maxWordsInClause + 1) val numWordsInClause = if (numWordsInText - wordCount < numWords) numWordsInText - wordCount else numWords var clauseLen: Int = 0 // Generate a clause for (i <- 1 to numWordsInClause) { val word: String = genRandWord(minWordLen, maxWordLen) text += word if (math.random < randomLevel) text += " " + keyword clauseLen += word.length wordCount += 1 if (i < numWordsInClause) { text += " " clauseLen += 1 } } // Add a punctuation text += puncts.charAt(random.nextInt(0, puncts.length)).toString clauseLen += 1 if (wordCount < numWordsInText) { text += " " clauseLen += 1 } textLen += clauseLen } // println(s"textLen (in chars): is $textLen") text.mkString } }
Example 20
Source File: WordCountProducer.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import java.util import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.kafka.ProducerMessage.Message import akka.kafka.ProducerSettings import akka.kafka.scaladsl.Producer import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import org.apache.kafka.clients.producer.{Partitioner, ProducerRecord} import org.apache.kafka.common.errors.{NetworkException, UnknownTopicOrPartitionException} import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{Cluster, PartitionInfo} import scala.concurrent.Future import scala.concurrent.duration._ class CustomPartitioner extends Partitioner { override def partition(topic: String, key: Any, keyBytes: Array[Byte], value: Any, valueBytes: Array[Byte], cluster: Cluster): Int = { val partitionInfoList: util.List[PartitionInfo] = cluster.availablePartitionsForTopic(topic) val partitionCount = partitionInfoList.size val fakeNewsPartition = 0 //println("CustomPartitioner received key: " + key + " and value: " + value) if (value.toString.contains(WordCountProducer.fakeNewsKeyword)) { //println("CustomPartitioner send message: " + value + " to fakeNewsPartition") fakeNewsPartition } else ThreadLocalRandom.current.nextInt(1, partitionCount) //round robin } override def close(): Unit = { println("CustomPartitioner: " + Thread.currentThread + " received close") } override def configure(configs: util.Map[String, _]): Unit = { println("CustomPartitioner received configure with configuration: " + configs) } } object CustomPartitioner { private def deserialize[V](objectData: Array[Byte]): V = org.apache.commons.lang3.SerializationUtils.deserialize(objectData).asInstanceOf[V] }
Example 21
Source File: WritePrimes.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.graphdsl import java.nio.file.Paths import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.stream._ import akka.stream.scaladsl._ import akka.util.ByteString import scala.concurrent.duration._ import scala.util.{Failure, Success} object WritePrimes extends App { implicit val system = ActorSystem("WritePrimes") implicit val ec = system.dispatcher val maxRandomNumberSize = 100 val primeSource: Source[Int, akka.NotUsed] = Source.fromIterator(() => Iterator.continually(ThreadLocalRandom.current().nextInt(maxRandomNumberSize))) .take(100) .filter(rnd => isPrime(rnd)) // neighbor +2 is also prime? .filter(prime => isPrime(prime + 2)) val fileSink = FileIO.toPath(Paths.get("target/primes.txt")) val slowSink = Flow[Int] .throttle(1, 1.seconds, 1, ThrottleMode.shaping) .map(i => ByteString(i.toString) ) .toMat(fileSink)((_, bytesWritten) => bytesWritten) val consoleSink = Sink.foreach[Int](each => println(s"Reached console sink: $each")) // Additional processing flow, to show the nature of the composition val sharedDoubler = Flow[Int].map(_ * 2) // send primes to both sinks using graph API val graph = GraphDSL.create(slowSink, consoleSink)((x, _) => x) { implicit builder => (slow, console) => import GraphDSL.Implicits._ val broadcastSplitter = builder.add(Broadcast[Int](2)) // the splitter - like a Unix tee primeSource ~> broadcastSplitter ~> sharedDoubler ~> slow // connect source to splitter, other side to slow sink (via sharedDoubler) broadcastSplitter ~> sharedDoubler ~> console // connect other side of splitter to console sink (via sharedDoubler) ClosedShape } val materialized = RunnableGraph.fromGraph(graph).run() materialized.onComplete { case Success(_) => system.terminate() case Failure(e) => println(s"Failure: ${e.getMessage}") system.terminate() } def isPrime(n: Int): Boolean = { if (n <= 1) false else if (n == 2) true else !(2 until n).exists(x => n % x == 0) } }
Example 22
Source File: CalculateMedian.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import java.util.concurrent.ThreadLocalRandom import akka.actor.ActorSystem import akka.stream.ThrottleMode import akka.stream.scaladsl.Source import scala.annotation.tailrec import scala.concurrent.duration._ //noinspection LanguageFeature object CalculateMedian { implicit val system = ActorSystem("CalculateMedian") implicit val ec = system.dispatcher def main(args: Array[String]) = { val maxRandomNumber = 100 val source = Source.fromIterator(() => Iterator.continually(ThreadLocalRandom.current().nextDouble(maxRandomNumber))) source .throttle(1, 10.millis, 1, ThrottleMode.shaping) .groupedWithin(100, 1.second) //.map{each => println(each); each} .map(each => medianOfMedians(each.toArray)) .runForeach(result => println(s"Median of Median (grouped by 5) over the last 100 elements: $result")) .onComplete(_ => system.terminate()) } @tailrec def findKMedian(arr: Array[Double], k: Int)(implicit choosePivot: Array[Double] => Double): Double = { val a = choosePivot(arr) val (s, b) = arr partition (a >) if (s.length == k) a // The following test is used to avoid infinite repetition else if (s.isEmpty) { val (s, b) = arr partition (a ==) if (s.length > k) a else findKMedian(b, k - s.length) } else if (s.length < k) findKMedian(b, k - s.length) else findKMedian(s, k) } def medianUpTo5(five: Array[Double]): Double = { def order2(a: Array[Double], i: Int, j: Int) = { if (a(i) > a(j)) { val t = a(i); a(i) = a(j); a(j) = t } } def pairs(a: Array[Double], i: Int, j: Int, k: Int, l: Int) = { if (a(i) < a(k)) { order2(a, j, k); a(j) } else { order2(a, i, l); a(i) } } if (five.length < 2) { return five(0) } order2(five, 0, 1) if (five.length < 4) return if (five.length == 2 || five(2) < five(0)) five(0) else if (five(2) > five(1)) five(1) else five(2) order2(five, 2, 3) if (five.length < 5) pairs(five, 0, 1, 2, 3) else if (five(0) < five(2)) { order2(five, 1, 4); pairs(five, 1, 4, 2, 3) } else { order2(five, 3, 4); pairs(five, 0, 1, 3, 4) } } def medianOfMedians(arr: Array[Double]): Double = { val medians = arr grouped 5 map medianUpTo5 toArray; if (medians.length <= 5) medianUpTo5(medians) else medianOfMedians(medians) } }
Example 23
Source File: OrderBookCancelBenchmark.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.model.orderbook import java.util.concurrent.{ThreadLocalRandom, TimeUnit} import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.dex.model.OrderBook import com.wavesplatform.dex.model.orderbook.OrderBookCancelBenchmark._ import com.wavesplatform.dex.model.state.OrderBookBenchmarkState import org.openjdk.jmh.annotations._ import org.openjdk.jmh.infra.Blackhole import org.scalacheck.Gen import scala.util.Random @OutputTimeUnit(TimeUnit.NANOSECONDS) @BenchmarkMode(Array(Mode.AverageTime)) @Threads(4) @Fork(1) @Warmup(iterations = 10) @Measurement(iterations = 10) class OrderBookCancelBenchmark { // @Benchmark def cancel_2500_to_1250_test(st: Cancel_2500_To_1250_State, bh: Blackhole): Unit = bh.consume { st.run() } @Benchmark def cancel_1250_to_0_test(st: Cancel_1250_To_0_State, bh: Blackhole): Unit = bh.consume { st.run() } } object OrderBookCancelBenchmark { @State(Scope.Thread) class Cancel_2500_To_1250_State extends CancelState(initOrderNumber = 2500, orderNumberAfterCancel = 1250) @State(Scope.Thread) class Cancel_1250_To_0_State extends CancelState(initOrderNumber = 1250, orderNumberAfterCancel = 0) sealed abstract class CancelState(initOrderNumber: Int, orderNumberAfterCancel: Int) extends OrderBookBenchmarkState { private val askPricesMin = 1000L * Order.PriceConstant private val askPricesMax = 2000L * Order.PriceConstant private val bidPricesMin = 1L * Order.PriceConstant private val bidPricesMax = 999L * Order.PriceConstant val orderBookGen: Gen[OrderBook] = fixedSidesOrdersGen( levelNumber = initOrderNumber / 2, orderNumberInLevel = 2, askPricesGen = Gen.choose(askPricesMin, askPricesMax), bidPricesGen = Gen.choose(bidPricesMin, bidPricesMax) ).map(Function.tupled(mkOrderBook)) val orderBook: OrderBook = orderBookGen.sample.get val orders: Seq[Order.Id] = { val xs = orderBook.allOrders.map(_.order.id()).toVector new Random(ThreadLocalRandom.current()).shuffle(xs).take(initOrderNumber - orderNumberAfterCancel) } def run(): OrderBook = orders.foldLeft(orderBook) { case (r, id) => r.cancel(id, ts)._1 } } }
Example 24
Source File: HasKafka.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.api import java.util.concurrent.ThreadLocalRandom import com.dimafeng.testcontainers.KafkaContainer import com.github.dockerjava.api.model.{ContainerNetwork, NetworkSettings} import com.typesafe.config.{Config, ConfigFactory} import com.wavesplatform.dex.it.test.InformativeTestStart import scala.collection.JavaConverters._ trait HasKafka { self: BaseContainersKit with InformativeTestStart => protected val kafkaContainerName = s"$networkName-kafka" protected val kafkaIp = getIp(12) protected def dexKafkaConfig(topic: String = ThreadLocalRandom.current.nextInt(0, Int.MaxValue).toString): Config = ConfigFactory.parseString( s"""waves.dex.events-queue { | type = kafka | kafka { | servers = "$kafkaIp:9092" | topic = "$topic" | } |}""".stripMargin ) protected val kafka: KafkaContainer = KafkaContainer().configure { k => k.withNetwork(network) k.withNetworkAliases(kafkaContainerName) k.withCreateContainerCmdModifier { cmd => cmd withName kafkaContainerName cmd withIpv4Address kafkaIp } } protected def disconnectKafkaFromNetwork(): Unit = { writeGlobalLog("--- Disconnecting Kafka from the network ---") kafka.dockerClient .disconnectFromNetworkCmd() .withContainerId(kafka.containerId) .withNetworkId(network.getId) .exec() waitForNetworkSettings(!_.getNetworks.containsKey(network.getId)) } protected def connectKafkaToNetwork(): Unit = { writeGlobalLog("--- Connecting Kafka to the network ---") kafka.dockerClient .connectToNetworkCmd() .withContainerId(kafka.containerId) .withNetworkId(network.getId) .withContainerNetwork( new ContainerNetwork() .withIpamConfig(new ContainerNetwork.Ipam().withIpv4Address(kafkaIp)) .withAliases(kafka.networkAliases.asJava)) .exec() waitForNetworkSettings(_.getNetworks.containsKey(network.getId)) } private def waitForNetworkSettings(pred: NetworkSettings => Boolean): Unit = Iterator .continually { Thread.sleep(1000) kafka.dockerClient.inspectContainerCmd(kafka.containerId).exec().getNetworkSettings } .zipWithIndex .find { case (ns, attempt) => pred(ns) || attempt == 10 } .fold(log.warn(s"Can't wait on ${kafka.containerId}"))(_ => ()) }
Example 25
Source File: MatcherSuiteBase.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.it import java.nio.charset.StandardCharsets import java.util.concurrent.ThreadLocalRandom import cats.instances.FutureInstances import com.wavesplatform.dex.asset.DoubleOps import com.wavesplatform.dex.domain.account.KeyPair import com.wavesplatform.dex.domain.asset.Asset import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.it.api.BaseContainersKit import com.wavesplatform.dex.it.api.node.HasWavesNode import com.wavesplatform.dex.it.config.{GenesisConfig, PredefinedAccounts, PredefinedAssets} import com.wavesplatform.dex.it.dex.HasDex import com.wavesplatform.dex.it.matchers.ItMatchers import com.wavesplatform.dex.it.test.InformativeTestStart import com.wavesplatform.dex.it.waves.{MkWavesEntities, ToWavesJConversions} import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits import com.wavesplatform.dex.waves.WavesFeeConstants import com.wavesplatform.it.api.ApiExtensions import org.scalatest.concurrent.Eventually import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.should.Matchers import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, CancelAfterFailure} import scala.concurrent.duration.DurationInt trait MatcherSuiteBase extends AnyFreeSpec with Matchers with CancelAfterFailure with BeforeAndAfterAll with BeforeAndAfterEach with Eventually with BaseContainersKit with HasDex with HasWavesNode with MkWavesEntities with ApiExtensions with ItMatchers with DoubleOps with WavesFeeConstants with PredefinedAssets with PredefinedAccounts with DiffMatcherWithImplicits with InformativeTestStart with FutureInstances with ToWavesJConversions with ScorexLogging { GenesisConfig.setupAddressScheme() override protected val moduleName: String = "dex-it" override implicit def patienceConfig: PatienceConfig = super.patienceConfig.copy(timeout = 30.seconds, interval = 1.second) override protected def beforeAll(): Unit = { log.debug(s"Perform beforeAll") kafkaServer.foreach { _ => createKafkaTopic(dexRunConfig.getString("waves.dex.events-queue.kafka.topic")) } wavesNode1.start() dex1.start() } override protected def afterAll(): Unit = { log.debug(s"Perform afterAll") stopBaseContainers() super.afterAll() } def createAccountWithBalance(balances: (Long, Asset)*): KeyPair = { val account = KeyPair(ByteStr(s"account-test-${ThreadLocalRandom.current().nextInt()}".getBytes(StandardCharsets.UTF_8))) balances.foreach { case (balance, asset) => assert( wavesNode1.api.balance(alice, asset) >= balance, s"Alice doesn't have enough balance in ${asset.toString} to make a transfer" ) broadcastAndAwait(mkTransfer(alice, account.toAddress, balance, asset)) } account } }
Example 26
Source File: AuthServiceRestConnector.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.tool.connectors import java.nio.charset.StandardCharsets import java.util.concurrent.ThreadLocalRandom import com.wavesplatform.dex.auth.JwtUtils import com.wavesplatform.dex.cli.ErrorOr import com.wavesplatform.dex.domain.account.KeyPair import com.wavesplatform.dex.domain.crypto import com.wavesplatform.dex.tool.connectors.AuthServiceRestConnector.AuthCredentials import sttp.model.Uri.QuerySegment case class AuthServiceRestConnector(target: String, chainId: Byte) extends RestConnector with JwtUtils { private def mkAuthTokenRequestParams(keyPair: KeyPair): List[QuerySegment] = { val jwtPayload = mkJwtSignedPayload(keyPair, networkByte = chainId) List( "grant_type" -> "password", "username" -> jwtPayload.publicKey.base58, "password" -> s"${jwtPayload.firstTokenExpirationInSeconds}:${jwtPayload.signature}", "scope" -> jwtPayload.scope.head, "client_id" -> jwtPayload.clientId ).map { case (k, v) => QuerySegment.KeyValue(k, v) } } def getAuthCredentials(maybeSeed: Option[String]): ErrorOr[AuthCredentials] = { val seed = maybeSeed getOrElse s"minion${ThreadLocalRandom.current.nextInt}" val keyPair = KeyPair(crypto secureHash (seed getBytes StandardCharsets.UTF_8)) val requestParams = mkAuthTokenRequestParams(keyPair) val uri = targetUri.copy(querySegments = requestParams) mkResponse { _.post(uri) }.map { j => AuthCredentials( keyPair = keyPair, token = (j \ "access_token").as[String], seed = seed ) } } } object AuthServiceRestConnector { final case class AuthCredentials(keyPair: KeyPair, token: String, seed: String) }