com.typesafe.scalalogging.Logger Scala Examples
The following examples show how to use com.typesafe.scalalogging.Logger.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SinkDataGroups.scala From ohara with Apache License 2.0 | 6 votes |
package oharastream.ohara.shabondi.sink import java.time.{Duration => JDuration} import java.util.concurrent._ import com.google.common.util.concurrent.ThreadFactoryBuilder import oharastream.ohara.common.util.Releasable import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.{ObjectKey, TopicKey} import scala.jdk.CollectionConverters._ private[sink] object SinkDataGroups { def apply(config: SinkConfig) = new SinkDataGroups(config) } private class SinkDataGroups( objectKey: ObjectKey, brokerProps: String, topicKeys: Set[TopicKey], pollTimeout: JDuration ) extends Releasable { def this(config: SinkConfig) = { this(config.objectKey, config.brokers, config.sinkFromTopics, config.sinkPollTimeout) } private val threadPool: ExecutorService = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("SinkDataGroups-%d").build()) private val log = Logger(classOf[SinkDataGroups]) private val dataGroups = new ConcurrentHashMap[String, DataGroup]() def removeGroup(name: String): Boolean = { val group = dataGroups.remove(name) if (group != null) { group.close() true } else false } def groupExist(name: String): Boolean = dataGroups.containsKey(name) def createIfAbsent(name: String): DataGroup = dataGroups.computeIfAbsent( name, { n => log.info("create data group: {}", n) val dataGroup = new DataGroup(n, objectKey, brokerProps, topicKeys, pollTimeout) threadPool.submit(dataGroup.queueProducer) dataGroup } ) def size: Int = dataGroups.size() def freeIdleGroup(idleTime: JDuration): Unit = { val groups = dataGroups.elements().asScala.toSeq groups.foreach { group => if (group.isIdle(idleTime)) { removeGroup(group.name) } } } override def close(): Unit = { dataGroups.asScala.foreach { case (_, dataGroup) => dataGroup.close() } threadPool.shutdown() } }
Example 2
Source File: ShabondiSource.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.WithDefinitions import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.shabondi.common.ShabondiUtils import oharastream.ohara.shabondi.source.WebServer class ShabondiSource extends WithDefinitions with Releasable { private[this] var webServer: WebServer = _ def start(args: Map[String, String]): Unit = { val config = new source.SourceConfig(args) webServer = new source.WebServer(config) webServer.start(CommonUtils.anyLocalAddress(), config.port) } override def close(): Unit = Releasable.close(webServer) } object ShabondiSource { private val log = Logger(ShabondiSource.getClass) def main(args: Array[String]): Unit = { val newArgs = ShabondiUtils.parseArgs(args) log.info("Shabondi arguments({}):", newArgs.size) newArgs.foreach { case (k, v) => log.info(s" $k=$v") } val source = new ShabondiSource() try source.start(newArgs) finally source.close() } }
Example 3
Source File: ShabondiSink.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.WithDefinitions import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.shabondi.common.ShabondiUtils import oharastream.ohara.shabondi.sink.WebServer class ShabondiSink extends WithDefinitions with Releasable { private[this] var webServer: WebServer = _ def start(args: Map[String, String]): Unit = { val config = new sink.SinkConfig(args) webServer = new sink.WebServer(config) webServer.start(CommonUtils.anyLocalAddress(), config.port) } override def close(): Unit = Releasable.close(webServer) } object ShabondiSink { private val log = Logger(ShabondiSink.getClass) def main(args: Array[String]): Unit = { val newArgs = ShabondiUtils.parseArgs(args) log.info("Arguments:") newArgs.foreach { case (k, v) => log.info(s" $k=$v") } val sink = new ShabondiSink() try sink.start(newArgs) finally sink.close() } }
Example 4
Source File: DataGroup.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi.sink import java.time.{Duration => JDuration} import java.util.concurrent.atomic.AtomicBoolean import java.util.function.Consumer import oharastream.ohara.common.util.Releasable import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.{ObjectKey, TopicKey} import oharastream.ohara.metrics.basic.Counter private[sink] class DataGroup( val name: String, objectKey: ObjectKey, brokerProps: String, topicKeys: Set[TopicKey], pollTimeout: JDuration ) extends Releasable { private val log = Logger(classOf[RowQueue]) private val rowCounter: Counter = Counter.builder .key(objectKey) .item(s"rows-$name") .unit("row") .document(s"The number of received rows of group $name") .value(0) .register() val queue = new RowQueue val queueProducer = new QueueProducer(name, queue, brokerProps, topicKeys, pollTimeout, rowCounter) private[this] val closed = new AtomicBoolean(false) def resume(): Unit = if (!closed.get) { queueProducer.resume() } def pause(): Unit = if (!closed.get) { queueProducer.pause() } def isIdle(idleTime: JDuration): Boolean = queue.isIdle(idleTime) override def close(): Unit = { if (closed.compareAndSet(false, true)) { var exception: Throwable = null val addSuppressedException: Consumer[Throwable] = (ex: Throwable) => { if (exception == null) exception = ex else exception.addSuppressed(ex) } Releasable.close(queueProducer, addSuppressedException) Releasable.close(rowCounter, addSuppressedException) if (exception != null) throw exception log.info("Group {} closed.", name) } } }
Example 5
Source File: SinkRouteHandler.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi.sink import java.time.{Duration => JDuration} import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes} import akka.http.scaladsl.server.{ExceptionHandler, Route} import com.typesafe.scalalogging.Logger import oharastream.ohara.common.data.Row import oharastream.ohara.common.util.Releasable import oharastream.ohara.shabondi.common.{JsonSupport, RouteHandler, ShabondiUtils} import org.apache.commons.lang3.StringUtils import scala.collection.mutable.ArrayBuffer import scala.compat.java8.DurationConverters._ import scala.concurrent.ExecutionContextExecutor import scala.concurrent.duration.Duration import spray.json.DefaultJsonProtocol._ import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ private[shabondi] object SinkRouteHandler { def apply(config: SinkConfig)(implicit actorSystem: ActorSystem) = new SinkRouteHandler(config) } private[shabondi] class SinkRouteHandler(config: SinkConfig)(implicit actorSystem: ActorSystem) extends RouteHandler { implicit private val contextExecutor: ExecutionContextExecutor = actorSystem.dispatcher private val log = Logger(classOf[SinkRouteHandler]) private[sink] val dataGroups = SinkDataGroups(config) def scheduleFreeIdleGroups(interval: JDuration, idleTime: JDuration): Unit = actorSystem.scheduler.scheduleWithFixedDelay(Duration(1, TimeUnit.SECONDS), interval.toScala) { () => { log.trace("scheduled free group, total group: {} ", dataGroups.size) dataGroups.freeIdleGroup(idleTime) } } private val exceptionHandler = ExceptionHandler { case ex: Throwable => log.error(ex.getMessage, ex) complete((StatusCodes.InternalServerError, ex.getMessage)) } private def fullyPollQueue(queue: RowQueue): Seq[Row] = { val buffer = ArrayBuffer.empty[Row] var item: Row = queue.poll() while (item != null) { buffer += item item = queue.poll() } buffer.toSeq } private def apiUrl = ShabondiUtils.apiUrl def route(): Route = handleExceptions(exceptionHandler) { path("groups" / Segment) { groupId => get { if (StringUtils.isAlphanumeric(groupId)) { val group = dataGroups.createIfAbsent(groupId) val result = fullyPollQueue(group.queue).map(row => JsonSupport.toRowData(row)) complete(result) } else { val entity = HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Illegal group name, only accept alpha and numeric.") complete(StatusCodes.NotAcceptable -> entity) } } ~ { complete(StatusCodes.MethodNotAllowed -> s"Unsupported method, please reference: $apiUrl") } } ~ { complete(StatusCodes.NotFound -> s"Please reference: $apiUrl") } } override def close(): Unit = { Releasable.close(dataGroups) } }
Example 6
Source File: QueueProducer.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi.sink import java.time.{Duration => JDuration} import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean import java.util.{Queue => JQueue} import oharastream.ohara.common.data.{Row, Serializer} import oharastream.ohara.common.util.Releasable import oharastream.ohara.kafka.Consumer import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.TopicKey import oharastream.ohara.metrics.basic.Counter import scala.jdk.CollectionConverters._ private[sink] class QueueProducer( val groupName: String, val queue: JQueue[Row], val brokerProps: String, val topicKeys: Set[TopicKey], val pollTimeout: JDuration, val rowCounter: Counter ) extends Runnable with Releasable { private[this] val log = Logger(classOf[QueueProducer]) private[this] val paused: AtomicBoolean = new AtomicBoolean(false) private[this] val stopped: AtomicBoolean = new AtomicBoolean(false) private[this] val consumer: Consumer[Row, Array[Byte]] = Consumer .builder() .keySerializer(Serializer.ROW) .valueSerializer(Serializer.BYTES) .offsetFromBegin() .topicKeys(topicKeys.asJava) .connectionProps(brokerProps) .build() override def run(): Unit = { log.info( "{} group `{}` start.(topics={}, brokerProps={})", this.getClass.getSimpleName, groupName, topicKeys.mkString(","), brokerProps ) try { while (!stopped.get) { if (!paused.get && queue.isEmpty) { val rows = consumer.poll(pollTimeout).asScala.map(_.key.get) rows.foreach { r => queue.add(r) rowCounter.incrementAndGet() } log.trace(" group[{}], queue: {}, rows: {}", groupName, queue.size, rows.size) } else { TimeUnit.MILLISECONDS.sleep(10) } } // while } finally { consumer.close() log.info("stopped.") } } override def close(): Unit = { stop() } def stop(): Unit = { stopped.set(true) } def pause(): Unit = { if (paused.compareAndSet(false, true)) { log.info("{} paused.", this.getClass.getSimpleName) } } def resume(): Unit = { if (paused.compareAndSet(true, false)) { log.info("{} resumed.", this.getClass.getSimpleName) } } }
Example 7
Source File: ShabondiUtils.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi.common import com.typesafe.scalalogging.Logger import oharastream.ohara.common.util.{CommonUtils, VersionUtils} import scala.jdk.CollectionConverters._ object ShabondiUtils { private val log = Logger(ShabondiUtils.getClass) private val ESCAPE_STRING1 = "_____" private val ESCAPE_STRING2 = "~~~~~" def escape(value: String): String = { if (value.contains(ESCAPE_STRING1)) throw new IllegalArgumentException(s"Cannot escape the value `$value` by escape string $ESCAPE_STRING1") if (value.contains(ESCAPE_STRING2)) throw new IllegalArgumentException(s"Cannot escape the value `$value` by escape string $ESCAPE_STRING2") value .replaceAll("\"", ESCAPE_STRING1) .replaceAll(" ", ESCAPE_STRING2) } def unescape(value: String): String = { value .replaceAll(ESCAPE_STRING1, "\"") .replaceAll(ESCAPE_STRING2, " ") } def parseArgs(args: Array[String]): Map[String, String] = CommonUtils .parse(args.toSeq.asJava) .asScala .toMap .map { case (k, v) => (k, unescape(v)) } def logArgs(args: Map[String, String]): Unit = { log.info("Arguments:") args.foreach { case (k, v) => log.info(s" $k=$v") } } def apiUrl: String = s"https://oharastream.github.io/en/docs/${VersionUtils.BRANCH}/shabondi/" }
Example 8
Source File: BasicShabondiTest.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi import java.util import java.util.concurrent.{ExecutorService, Executors} import com.google.common.util.concurrent.ThreadFactoryBuilder import com.typesafe.scalalogging.Logger import oharastream.ohara.common.data.Row import oharastream.ohara.common.setting.TopicKey import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.kafka.TopicAdmin import oharastream.ohara.shabondi.common.ShabondiUtils import oharastream.ohara.shabondi.sink.SinkConfig import oharastream.ohara.shabondi.source.SourceConfig import oharastream.ohara.testing.WithBroker import org.junit.After import scala.collection.{immutable, mutable} import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters._ private[shabondi] abstract class BasicShabondiTest extends WithBroker { protected val log = Logger(this.getClass()) protected val brokerProps = testUtil.brokersConnProps protected val topicAdmin: TopicAdmin = TopicAdmin.of(brokerProps) protected val newThreadPool: () => ExecutorService = () => Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(this.getClass.getSimpleName + "-").build()) protected val countRows: (util.Queue[Row], Long, ExecutionContext) => Future[Long] = (queue, executionTime, ec) => Future { log.debug("countRows begin...") val baseTime = System.currentTimeMillis() var count = 0L var running = true while (running) { val row = queue.poll() if (row != null) count += 1 else Thread.sleep(100) running = (System.currentTimeMillis() - baseTime) < executionTime } log.debug("countRows done") count }(ec) protected def createTopicKey = TopicKey.of("default", CommonUtils.randomString(5)) protected def createTestTopic(topicKey: TopicKey): Unit = topicAdmin.topicCreator .numberOfPartitions(1) .numberOfReplications(1.toShort) .topicKey(topicKey) .create protected def defaultSourceConfig( sourceToTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SourceConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSource].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sourceToTopics.nonEmpty) args += s"${SOURCE_TO_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sourceToTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SourceConfig(rawConfig) } protected def defaultSinkConfig( sinkFromTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SinkConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSink].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sinkFromTopics.nonEmpty) args += s"${SINK_FROM_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sinkFromTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SinkConfig(rawConfig) } protected def singleRow(columnSize: Int, rowId: Int = 0): Row = KafkaSupport.singleRow(columnSize, rowId) protected def multipleRows(rowSize: Int): immutable.Iterable[Row] = KafkaSupport.multipleRows(rowSize) @After def tearDown(): Unit = { Releasable.close(topicAdmin) } }
Example 9
Source File: ServiceKeyHolder.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.it import java.util.concurrent.TimeUnit import oharastream.ohara.agent.container.ContainerClient import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.{CommonUtils, Releasable} import com.typesafe.scalalogging.Logger import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} if (!finalClose || !KEEP_CONTAINERS) result(client.containers()) .filter( container => clusterKey.exists(key => container.name.contains(key.group()) && container.name.contains(key.name())) ) .filterNot(container => excludedNodes.contains(container.nodeName)) .foreach { container => try { println(s"[-----------------------------------${container.name}-----------------------------------]") // Before 10 minutes container log. Avoid the OutOfMemory of Java heap val containerLogs = try result(client.log(container.name, Option(600))) catch { case e: Throwable => s"failed to fetch the logs for container:${container.name}. caused by:${e.getMessage}" } println(containerLogs) println("[------------------------------------------------------------------------------------]") result(client.forceRemove(container.name)) } catch { case e: Throwable => LOG.error(s"failed to remove container ${container.name}", e) } } finally Releasable.close(client) }
Example 10
Source File: ReflectionUtils.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator import java.lang.reflect.Modifier import com.typesafe.scalalogging.Logger import oharastream.ohara.client.configurator.FileInfoApi.ClassInfo import oharastream.ohara.common.setting.WithDefinitions import oharastream.ohara.kafka.connector.{RowSinkConnector, RowSourceConnector} import org.reflections.Reflections import org.reflections.util.{ClasspathHelper, ConfigurationBuilder} import scala.jdk.CollectionConverters._ object ReflectionUtils { private[this] val LOG = Logger(ReflectionUtils.getClass) lazy val localConnectorDefinitions: Seq[ClassInfo] = new Reflections( new ConfigurationBuilder() // we ought to define urls manually since Reflections does not work on java 11 // It can't find correct urls without pre-defined urls. .setUrls(ClasspathHelper.forJavaClassPath) ).getSubTypesOf(classOf[WithDefinitions]) .asScala .toSeq .filter( clz => classOf[RowSourceConnector].isAssignableFrom(clz) || classOf[RowSinkConnector].isAssignableFrom(clz) ) // the abstract class is not instantiable. .filterNot(clz => Modifier.isAbstract(clz.getModifiers)) .flatMap { clz => try Some((clz.getName, clz.getDeclaredConstructor().newInstance().settingDefinitions().values().asScala.toSeq)) catch { case e: Throwable => LOG.error(s"failed to instantiate ${clz.getName}", e) None } } .map { case (className, definitions) => ClassInfo( className = className, settingDefinitions = definitions ) } }
Example 11
Source File: ConsoleSinkTask.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.connector.console import java.util import java.util.concurrent.TimeUnit import oharastream.ohara.common.annotations.VisibleForTesting import oharastream.ohara.common.util.CommonUtils import oharastream.ohara.kafka.connector.{RowSinkRecord, RowSinkTask, TaskSetting} import com.typesafe.scalalogging.Logger import scala.concurrent.duration.Duration import scala.jdk.CollectionConverters._ class ConsoleSinkTask extends RowSinkTask { private[this] val LOG = Logger(classOf[ConsoleSinkTask]) @VisibleForTesting private[console] var freq: Duration = CONSOLE_FREQUENCE_DEFAULT @VisibleForTesting private[console] var divider: String = CONSOLE_ROW_DIVIDER_DEFAULT @VisibleForTesting private[console] var lastLog: Long = -1 override protected def run(config: TaskSetting): Unit = { divider = config.stringOption(CONSOLE_ROW_DIVIDER).orElse(CONSOLE_ROW_DIVIDER_DEFAULT) freq = Duration( config .durationOption(CONSOLE_FREQUENCE) .orElse(java.time.Duration.ofMillis(CONSOLE_FREQUENCE_DEFAULT.toMillis)) .toMillis, TimeUnit.MILLISECONDS ) } override protected def terminate(): Unit = { // do nothing } override protected def putRecords(records: util.List[RowSinkRecord]): Unit = if (!records.isEmpty && (lastLog == -1 || CommonUtils.current() - lastLog >= freq.toMillis)) { try { LOG.info(records.asScala.map(_.row()).mkString(divider)) } finally lastLog = CommonUtils.current() } }
Example 12
Source File: DamlContractTemplateGen.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.codegen.lf import java.io.File import com.daml.codegen.Util import com.daml.lf.data.ImmArray.ImmArraySeq import com.daml.lf.data.Ref.{Identifier, QualifiedName} import com.typesafe.scalalogging.Logger import scala.reflect.runtime.universe._ object DamlContractTemplateGen { import LFUtil.rpcValueAlias private val logger: Logger = Logger(getClass) def generate( util: LFUtil, templateId: Identifier, templateInterface: DefTemplateWithRecord.FWT, companionMembers: Iterable[Tree] ): (File, Set[Tree], Iterable[Tree]) = { val templateName = util.mkDamlScalaName(Util.Template, templateId) val contractName = util.mkDamlScalaName(Util.Contract, templateId) val syntaxIdDecl = LFUtil.toCovariantTypeDef(" ExOn") val syntaxIdType = TypeName(" ExOn") logger.debug(s"generate templateDecl: ${templateName.toString}, ${templateInterface.toString}") val templateChoiceMethods = templateInterface.template.choices.flatMap { case (id, interface) => util.genTemplateChoiceMethods( templateType = tq"${TypeName(contractName.name)}", idType = syntaxIdType, id, interface) } def toNamedArgumentsMethod = q""" override def toNamedArguments(` self`: ${TypeName(contractName.name)}) = ${util.toNamedArgumentsMap(templateInterface.`type`.fields.toList, Some(q"` self`"))} """ def fromNamedArgumentsMethod = { import templateInterface.`type`.fields val typeObjectCase = if (fields.isEmpty) q"_root_.scala.Some(${TermName(templateName.name)}())" else { val args = LFUtil.generateIds(fields.size, "z") util.genForComprehensionBodyOfReaderMethod(fields, args, " r", q"""${TermName( templateName.name)}(..$args)""") } q""" override def fromNamedArguments(` r`: $rpcValueAlias.Record) = $typeObjectCase """ } def consumingChoicesMethod = LFUtil.genConsumingChoicesMethod(templateInterface.template) val Identifier(_, QualifiedName(moduleName, baseName)) = templateId val packageIdRef = PackageIDsGen.reference(util)(moduleName) def templateObjectMembers = Seq( q"override val id = ` templateId`(packageId=$packageIdRef, moduleName=${moduleName.dottedName}, entityName=${baseName.dottedName})", q"""implicit final class ${TypeName(s"${contractName.name} syntax")}[$syntaxIdDecl](private val id: $syntaxIdType) extends _root_.scala.AnyVal { ..$templateChoiceMethods }""", consumingChoicesMethod, toNamedArgumentsMethod, fromNamedArgumentsMethod ) def templateClassMembers = Seq( q"protected[this] override def templateCompanion(implicit ` d`: _root_.scala.Predef.DummyImplicit) = ${TermName(templateName.name)}" ) DamlDataTypeGen.generate( util, ScopedDataType(templateId, ImmArraySeq.empty, templateInterface.`type`), isTemplate = true, rootClassChildren = templateClassMembers, companionChildren = templateObjectMembers ++ companionMembers ) } }
Example 13
Source File: Registry.scala From kanadi with MIT License | 5 votes |
package org.zalando.kanadi.api import java.net.URI import defaults._ import akka.http.scaladsl.HttpExt import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{ContentTypes, HttpMethods, HttpRequest, Uri} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.Materializer import com.typesafe.scalalogging.{Logger, LoggerTakingImplicit} import de.heikoseeberger.akkahttpcirce.ErrorAccumulatingCirceSupport._ import org.mdedetrich.webmodels.{FlowId, OAuth2TokenProvider} import org.mdedetrich.webmodels.RequestHeaders.`X-Flow-ID` import org.zalando.kanadi.models._ import scala.concurrent.{ExecutionContext, Future} case class Registry(baseUri: URI, oAuth2TokenProvider: Option[OAuth2TokenProvider] = None)(implicit kanadiHttpConfig: HttpConfig, http: HttpExt, materializer: Materializer) extends RegistryInterface { protected val logger: LoggerTakingImplicit[FlowId] = Logger.takingImplicit[FlowId](classOf[Registry]) private val baseUri_ = Uri(baseUri.toString) def partitionStrategies(implicit flowId: FlowId = randomFlowId(), executionContext: ExecutionContext): Future[List[PartitionStrategy]] = { val uri = baseUri_.withPath(baseUri_.path / "registry" / "partition-strategies") val baseHeaders = List(RawHeader(`X-Flow-ID`, flowId.value)) for { headers <- oAuth2TokenProvider match { case None => Future.successful(baseHeaders) case Some(futureProvider) => futureProvider.value().map { oAuth2Token => toHeader(oAuth2Token) +: baseHeaders } } request = HttpRequest(HttpMethods.GET, uri, headers) _ = logger.debug(request.toString) response <- http.singleRequest(request) result <- { if (response.status.isSuccess()) { Unmarshal(response.entity.httpEntity.withContentType(ContentTypes.`application/json`)) .to[List[PartitionStrategy]] } else processNotSuccessful(request, response) } } yield result } }
Example 14
Source File: FrameHandler.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws package netty import com.typesafe.scalalogging.Logger import io.netty.channel.ChannelHandlerContext import io.netty.channel.SimpleChannelInboundHandler import io.netty.handler.codec.http.websocketx.TextWebSocketFrame import io.netty.handler.codec.http.websocketx.WebSocketFrame import scala.concurrent.ExecutionContext import ipc.ClientOut final private class FrameHandler(implicit ec: ExecutionContext) extends SimpleChannelInboundHandler[WebSocketFrame] { import FrameHandler._ import ProtocolHandler.key override protected def channelRead0( ctx: ChannelHandlerContext, anyFrame: WebSocketFrame ) = anyFrame match { case frame: TextWebSocketFrame => val txt = frame.text if (txt.nonEmpty) { val limiter = ctx.channel.attr(key.limit).get if (limiter == null || limiter(txt)) ClientOut parse txt foreach { case ClientOut.Unexpected(msg) => Monitor.clientOutUnexpected.increment() logger.info(s"Unexpected $msg") case ClientOut.WrongHole => Monitor.clientOutWrongHole.increment() case out => Option(ctx.channel.attr(key.client).get) match { case Some(clientFu) => clientFu.value match { case Some(client) => client foreach (_ ! out) case None => clientFu foreach (_ ! out) } case None => logger.warn(s"No client actor to receive $out") } } } case frame => logger.info("unsupported frame type: " + frame.getClass().getName()) } } private object FrameHandler { private val logger = Logger(getClass) }
Example 15
Source File: NettyServer.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws package netty import com.typesafe.config.Config import com.typesafe.scalalogging.Logger import io.netty.bootstrap.ServerBootstrap import io.netty.channel.{ Channel, ChannelInitializer } import io.netty.channel.epoll.{ EpollEventLoopGroup, EpollServerSocketChannel } import io.netty.channel.nio.NioEventLoopGroup import io.netty.channel.socket.nio.NioServerSocketChannel import io.netty.handler.codec.http._ import scala.concurrent.ExecutionContext final class NettyServer( clients: ClientSystem, router: Router, config: Config )(implicit ec: ExecutionContext) { private val logger = Logger(getClass) def start(): Unit = { logger.info("Start") val port = config.getInt("http.port") val useEpoll = config.getBoolean("netty.useEpoll") val bossGroup = if (useEpoll) new EpollEventLoopGroup(1) else new NioEventLoopGroup(1) val workerGroup = if (useEpoll) new EpollEventLoopGroup else new NioEventLoopGroup val channelClz = if (useEpoll) classOf[EpollServerSocketChannel] else classOf[NioServerSocketChannel] try { val boot = new ServerBootstrap boot .group(bossGroup, workerGroup) .channel(channelClz) .childHandler(new ChannelInitializer[Channel] { override def initChannel(ch: Channel): Unit = { val pipeline = ch.pipeline() pipeline.addLast(new HttpServerCodec) pipeline.addLast(new HttpObjectAggregator(4096)) pipeline.addLast(new ProtocolHandler(clients, router)) pipeline.addLast(new FrameHandler) } }) val server = boot.bind(port).sync().channel() logger.info(s"Listening to $port") server.closeFuture().sync() logger.info(s"Closed $port") } finally { bossGroup.shutdownGracefully() workerGroup.shutdownGracefully() } } }
Example 16
Source File: RateLimit.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import com.typesafe.scalalogging.Logger final class RateLimit( maxCredits: Int, intervalMillis: Int, name: String ) { import RateLimit._ private def makeClearAt: Long = nowMillis + intervalMillis private var credits: Int = maxCredits private var clearAt: Long = makeClearAt private var logged: Boolean = false def apply(msg: => String = ""): Boolean = if (credits > 0) { credits -= 1 true } else if (clearAt < nowMillis) { credits = maxCredits clearAt = makeClearAt true } else { if (!logged) { logged = true logger.info(s"$name MSG: $msg") } Monitor rateLimit name false } } object RateLimit { type Charge = Cost => Unit type Cost = Int private def nowMillis = System.currentTimeMillis() private val logger = Logger(getClass) }
Example 17
Source File: AbstractAlertTrigger.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse.alertengine.trigger import com.typesafe.scalalogging.Logger import io.phdata.pulse.alertengine.{ AlertRule, AlertsDb, TriggeredAlert } import org.slf4j.LoggerFactory def query(applicationName: String, alertRule: AlertRule): Seq[Map[String, Any]] override final def check(applicationName: String, alertRule: AlertRule): Option[TriggeredAlert] = if (AlertsDb.shouldCheck(applicationName, alertRule)) { try { val results = query(applicationName, alertRule) processResults(applicationName, alertRule, results) } catch { case e: Exception => e.printStackTrace() logger.error(s"Error running query for $applicationName with alert $alertRule", e) None } } else { None } private def processResults(applicationName: String, alertRule: AlertRule, results: Seq[Map[String, Any]]): Option[TriggeredAlert] = { val numFound = results.size val threshold = alertRule.resultThreshold.getOrElse(0) if (threshold == -1 && results.isEmpty) { logger.info( s"Alert triggered for $applicationName on alert $alertRule at no results found condition") AlertsDb.markTriggered(applicationName, alertRule) Some(TriggeredAlert(alertRule, applicationName, results, 0)) } else if (results.lengthCompare(threshold) > 0) { logger.info(s"Alert triggered for $applicationName on alert $alertRule") AlertsDb.markTriggered(applicationName, alertRule) Some(TriggeredAlert(alertRule, applicationName, results, numFound)) } else { logger.info(s"No alert needed for $applicationName with alert $alertRule") None } } }
Example 18
Source File: LoggerHandlerWithId.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.http.scaladsl.model.{ StatusCode, StatusCodes } import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory._ import com.typesafe.scalalogging.Logger import org.slf4j.{ LoggerFactory, MDC } import scala.collection.mutable class LoggerHandlerWithId { @transient private lazy val log: Logger = Logger(LoggerFactory.getLogger(getClass.getName)) private val requestIdKey = "request.id" private val statusCodeKey = "request.statusCode" def debug(message: String, args: Any*)(implicit id: RequestId): Unit = { MDC.put(requestIdKey, id.value) MDC.put(statusCodeKey, "-") log.debug(message, args.asInstanceOf[mutable.WrappedArray[AnyRef]]: _*) MDC.remove(requestIdKey) MDC.remove(statusCodeKey) } def info(message: String, args: Any*)(implicit id: RequestId): Unit = { MDC.put(requestIdKey, id.value) MDC.put(statusCodeKey, "-") log.info(message, args.asInstanceOf[mutable.WrappedArray[AnyRef]]: _*) MDC.remove(requestIdKey) MDC.remove(statusCodeKey) } def warn(message: String, args: Any*)(implicit id: RequestId, statusCode: StatusCode = StatusCodes.Continue): Unit = { MDC.put(requestIdKey, id.value) MDC.put(statusCodeKey, statusCode.value) if (args.isInstanceOf[mutable.WrappedArray[_]]) log.warn(message, args.asInstanceOf[mutable.WrappedArray[AnyRef]]: _*) else log.warn(message, args.asInstanceOf[scala.collection.immutable.$colon$colon[AnyRef]]: _*) MDC.remove(requestIdKey) MDC.remove(statusCodeKey) } def error(message: String, args: Any*)(implicit id: RequestId, statusCode: StatusCode = StatusCodes.Continue): Unit = { MDC.put(requestIdKey, id.value) MDC.put(statusCodeKey, statusCode.value) countLogErrors(MetricsFactory.ERROR_REPORTED_TOTAL) if (args.isInstanceOf[mutable.WrappedArray[_]]) log.error(message, args.asInstanceOf[mutable.WrappedArray[AnyRef]]: _*) else log.error(message, args.asInstanceOf[scala.collection.immutable.$colon$colon[AnyRef]]: _*) MDC.remove(requestIdKey) MDC.remove(statusCodeKey) } }
Example 19
Source File: ImapSessionExecutor.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.ActorRef import com.linagora.gatling.imap.protocol.{ImapResponses, Response, UserId} import com.typesafe.scalalogging.Logger import com.yahoo.imapnio.async.client.ImapFuture import com.yahoo.imapnio.async.response.ImapAsyncResponse import scala.collection.immutable.Seq import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global private[command] object ImapSessionExecutor { def listen(self: ActorRef, userId: UserId, getResponse: ImapResponses => Response)(logger: Logger)(response: ImapFuture[ImapAsyncResponse]): Unit = { listenWithHandler(self, userId, getResponse, _ => ())(logger)(response) } def listenWithHandler[T](self: ActorRef, userId: UserId, getResponse: ImapResponses => Response, callback: Future[ImapAsyncResponse] => T)(logger: Logger)(response: ImapFuture[ImapAsyncResponse]): T = { import collection.JavaConverters._ callback(Future { val responses = response.get() val responsesList = ImapResponses(responses.getResponseLines.asScala.to[Seq]) logger.trace(s"On response for $userId :\n ${responsesList.mkString("\n")}") self ! getResponse(responsesList) responses }) } }
Example 20
Source File: NiFiPacketToObject.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.nio.charset.StandardCharsets import java.util import com.orendainx.trucking.commons.models.{EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.nifi.storm.NiFiDataPacket import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} class NiFiPacketToObject extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector } override def execute(tuple: Tuple): Unit = { val dp = tuple.getValueByField("nifiDataPacket").asInstanceOf[NiFiDataPacket] // Convert each tuple, really a NiFiDataPackge, into its proper case class instance (e.g. EnrichedTruckData or TrafficData) val (dataType, data) = dp.getAttributes.get("dataType") match { case typ @ "EnrichedTruckData" => (typ, EnrichedTruckData.fromCSV(new String(dp.getContent, StandardCharsets.UTF_8))) case typ @ "TrafficData" => (typ, TrafficData.fromCSV(new String(dp.getContent, StandardCharsets.UTF_8))) } outputCollector.emit(new Values(dataType, data)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("dataType", "data")) }
Example 21
Source File: SerializedWithSchemaToObject.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.io.ByteArrayInputStream import java.nio.charset.StandardCharsets import java.util import com.hortonworks.registries.schemaregistry.SchemaMetadata import com.hortonworks.registries.schemaregistry.avro.AvroSchemaProvider import com.hortonworks.registries.schemaregistry.client.SchemaRegistryClient import com.hortonworks.registries.schemaregistry.serdes.avro.AvroSnapshotDeserializer import com.orendainx.trucking.commons.models.{EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.avro.generic.{GenericData, GenericRecord} import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} import scala.collection.JavaConversions._ class SerializedWithSchemaToObject extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ // Declare schema-related fields to be initialized when this component's prepare() method is called private var schemaRegistryClient: SchemaRegistryClient = _ private var deserializer: AvroSnapshotDeserializer = _ private var truckDataSchemaMetadata: SchemaMetadata = _ private var trafficDataSchemaMetadata: SchemaMetadata = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector val schemaRegistryUrl = stormConf.get(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name()).toString val clientConfig = Map(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name() -> schemaRegistryUrl) schemaRegistryClient = new SchemaRegistryClient(clientConfig) truckDataSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("EnrichedTruckData").getSchemaMetadata trafficDataSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("TrafficData").getSchemaMetadata deserializer = schemaRegistryClient.getDefaultDeserializer(AvroSchemaProvider.TYPE).asInstanceOf[AvroSnapshotDeserializer] deserializer.init(clientConfig) } override def execute(tuple: Tuple): Unit = { // Deserialize each tuple and convert it into its proper case class (e.g. EnrichedTruckData or TrafficData) val str = tuple.getStringByField("data").getBytes(StandardCharsets.UTF_8) log.info(s"str2: ${tuple.getStringByField("data")}") val bytes = new ByteArrayInputStream(str) log.info(s"bytes: $bytes") val (dataType, data) = tuple.getStringByField("dataType") match { case typ @ "EnrichedTruckData" => log.info(s"des: ${deserializer.deserialize(bytes, null)}") (typ, recordToEnrichedTruckData(deserializer.deserialize(bytes, null).asInstanceOf[GenericData.Record])) case typ @ "TrafficData" => log.info(s"des: ${deserializer.deserialize(bytes, null)}") (typ, recordToTrafficData(deserializer.deserialize(bytes, null).asInstanceOf[GenericData.Record])) } outputCollector.emit(new Values(data, dataType)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("data", "dataType")) // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToEnrichedTruckData(r: GenericRecord): EnrichedTruckData = EnrichedTruckData( r.get("eventTime").toString.toLong, r.get("truckId").toString.toInt, r.get("driverId").toString.toInt, r.get("driverName").toString, r.get("routeId").toString.toInt, r.get("routeName").toString, r.get("latitude").toString.toDouble, r.get("longitude").toString.toDouble, r.get("speed").toString.toInt, r.get("eventType").toString, r.get("foggy").toString.toInt, r.get("rainy").toString.toInt, r.get("windy").toString.toInt) // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToTrafficData(r: GenericRecord): TrafficData = TrafficData(r.get("eventTime").toString.toLong, r.get("routeId").toString.toInt, r.get("congestionLevel").toString.toInt) }
Example 22
Source File: TruckAndTrafficJoinBolt.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.util import com.orendainx.trucking.commons.models.{EnrichedTruckAndTrafficData, EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseWindowedBolt import org.apache.storm.tuple.{Fields, Values} import org.apache.storm.windowing.TupleWindow import scala.collection.JavaConverters._ import scala.collection.mutable.ListBuffer import scala.collection.{Map, mutable} import scala.language.implicitConversions private def processAndEmitData(truckDataPerRoute: Map[Int, ListBuffer[EnrichedTruckData]], trafficDataPerRoute: Map[Int, ListBuffer[TrafficData]]) { // For each EnrichedTruckData object, find the TrafficData object with the closest timestamp truckDataPerRoute.foreach { case (routeId, truckDataList) => trafficDataPerRoute.get(routeId) match { case None => // No traffic data for this routeId, so drop/ignore truck data case Some(trafficDataList) => truckDataList foreach { truckData => trafficDataList.sortBy(data => math.abs(data.eventTime - truckData.eventTime)).headOption match { case None => // Window didn't capture any traffic data for this truck's route case Some(trafficData) => val joinedData = EnrichedTruckAndTrafficData(truckData.eventTime, truckData.truckId, truckData.driverId, truckData.driverName, truckData.routeId, truckData.routeName, truckData.latitude, truckData.longitude, truckData.speed, truckData.eventType, truckData.foggy, truckData.rainy, truckData.windy, trafficData.congestionLevel) outputCollector.emit(new Values("EnrichedTruckAndTrafficData", joinedData)) } } } } } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("dataType", "data")) }
Example 23
Source File: ObjectToBytesWithSchema.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.util import com.hortonworks.registries.schemaregistry.avro.AvroSchemaProvider import com.hortonworks.registries.schemaregistry.client.SchemaRegistryClient import com.hortonworks.registries.schemaregistry.serdes.avro.AvroSnapshotSerializer import com.hortonworks.registries.schemaregistry.{SchemaMetadata, SchemaVersionInfo} import com.orendainx.trucking.commons.models.{EnrichedTruckAndTrafficData, WindowedDriverStats} import com.typesafe.scalalogging.Logger import org.apache.avro.Schema import org.apache.avro.generic.GenericData import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} import scala.collection.JavaConverters._ class ObjectToBytesWithSchema extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ // Declare schema-related fields to be initialized when this component's prepare() method is called private var schemaRegistryClient: SchemaRegistryClient = _ private var serializer: AvroSnapshotSerializer = _ private var joinedSchemaMetadata: SchemaMetadata = _ private var joinedSchemaInfo: SchemaVersionInfo = _ private var driverStatsSchemaMetadata: SchemaMetadata = _ private var driverStatsJoinedSchemaInfo: SchemaVersionInfo = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector val schemaRegistryUrl = stormConf.get(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name()).toString val clientConfig = Map(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name() -> schemaRegistryUrl).asJava schemaRegistryClient = new SchemaRegistryClient(clientConfig) joinedSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("EnrichedTruckAndTrafficData").getSchemaMetadata joinedSchemaInfo = schemaRegistryClient.getLatestSchemaVersionInfo("EnrichedTruckAndTrafficData") driverStatsSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("WindowedDriverStats").getSchemaMetadata driverStatsJoinedSchemaInfo = schemaRegistryClient.getLatestSchemaVersionInfo("WindowedDriverStats") serializer = schemaRegistryClient.getDefaultSerializer(AvroSchemaProvider.TYPE).asInstanceOf[AvroSnapshotSerializer] serializer.init(clientConfig) } override def execute(tuple: Tuple): Unit = { val serializedBytes = tuple.getStringByField("dataType") match { case "EnrichedTruckAndTrafficData" => val record = enrichedTruckAndTrafficToGenericRecord(tuple.getValueByField("data").asInstanceOf[EnrichedTruckAndTrafficData]) serializer.serialize(record, joinedSchemaMetadata) case "WindowedDriverStats" => val record = enrichedTruckAndTrafficToGenericRecord(tuple.getValueByField("data").asInstanceOf[WindowedDriverStats]) serializer.serialize(record, driverStatsSchemaMetadata) } outputCollector.emit(new Values(serializedBytes)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("data")) private def enrichedTruckAndTrafficToGenericRecord(data: EnrichedTruckAndTrafficData) = { val record = new GenericData.Record(new Schema.Parser().parse(joinedSchemaInfo.getSchemaText)) record.put("eventTime", data.eventTime) record.put("truckId", data.truckId) record.put("driverId", data.driverId) record.put("driverName", data.driverName) record.put("routeId", data.routeId) record.put("routeName", data.routeName) record.put("latitude", data.latitude) record.put("longitude", data.longitude) record.put("speed", data.speed) record.put("eventType", data.eventType) record.put("foggy", data.foggy) record.put("rainy", data.rainy) record.put("windy", data.windy) record.put("congestionLevel", data.congestionLevel) record } private def enrichedTruckAndTrafficToGenericRecord(data: WindowedDriverStats) = { val record = new GenericData.Record(new Schema.Parser().parse(driverStatsJoinedSchemaInfo.getSchemaText)) record.put("driverId", data.driverId) record.put("averageSpeed", data.averageSpeed) record.put("totalFog", data.totalFog) record.put("totalRain", data.totalRain) record.put("totalWind", data.totalWind) record.put("totalViolations", data.totalViolations) record } }
Example 24
Source File: ObjectToCSVString.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.nio.charset.StandardCharsets import java.util import com.orendainx.trucking.commons.models.{EnrichedTruckAndTrafficData, WindowedDriverStats} import com.typesafe.scalalogging.Logger import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} class ObjectToCSVString extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector } override def execute(tuple: Tuple): Unit = { val str = tuple.getStringByField("dataType") match { case "EnrichedTruckAndTrafficData" => tuple.getValueByField("data").asInstanceOf[EnrichedTruckAndTrafficData].toCSV case "WindowedDriverStats" => tuple.getValueByField("data").asInstanceOf[WindowedDriverStats].toCSV } outputCollector.emit(new Values(str)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("data")) }
Example 25
Source File: CSVStringToObject.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.util import com.orendainx.trucking.commons.models.{EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} class CSVStringToObject extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector } override def execute(tuple: Tuple): Unit = { // Convert each string into its proper case class instance (e.g. EnrichedTruckData or TrafficData) val (dataType, data) = tuple.getStringByField("dataType") match { case typ @ "EnrichedTruckData" => (typ, EnrichedTruckData.fromCSV(tuple.getStringByField("data"))) case typ @ "TrafficData" => (typ, TrafficData.fromCSV(tuple.getStringByField("data"))) } outputCollector.emit(new Values(dataType, data)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("dataType", "data")) }
Example 26
Source File: NiFiPacketWithSchemaToObject.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.io.ByteArrayInputStream import java.util import com.hortonworks.registries.schemaregistry.SchemaMetadata import com.hortonworks.registries.schemaregistry.avro.AvroSchemaProvider import com.hortonworks.registries.schemaregistry.client.SchemaRegistryClient import com.hortonworks.registries.schemaregistry.serdes.avro.AvroSnapshotDeserializer import com.orendainx.trucking.commons.models.{EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.avro.generic.{GenericData, GenericRecord} import org.apache.nifi.storm.NiFiDataPacket import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} import scala.collection.JavaConversions._ class NiFiPacketWithSchemaToObject extends BaseRichBolt { private lazy val log = Logger(this.getClass) private var outputCollector: OutputCollector = _ // Declare schema-related fields to be initialized when this component's prepare() method is called private var schemaRegistryClient: SchemaRegistryClient = _ private var deserializer: AvroSnapshotDeserializer = _ private var truckDataSchemaMetadata: SchemaMetadata = _ private var trafficDataSchemaMetadata: SchemaMetadata = _ override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = { outputCollector = collector val schemaRegistryUrl = stormConf.get(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name()).toString val clientConfig = Map(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name() -> schemaRegistryUrl) schemaRegistryClient = new SchemaRegistryClient(clientConfig) truckDataSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("EnrichedTruckData").getSchemaMetadata trafficDataSchemaMetadata = schemaRegistryClient.getSchemaMetadataInfo("TrafficData").getSchemaMetadata deserializer = schemaRegistryClient.getDefaultDeserializer(AvroSchemaProvider.TYPE).asInstanceOf[AvroSnapshotDeserializer] deserializer.init(clientConfig) } override def execute(tuple: Tuple): Unit = { val dp = tuple.getValueByField("nifiDataPacket").asInstanceOf[NiFiDataPacket] // Deserialize each tuple and convert it into its proper case class (e.g. EnrichedTruckData or TrafficData) val (dataType, data) = dp.getAttributes.get("dataType") match { case typ @ "EnrichedTruckData" => (typ, recordToEnrichedTruckData(deserializer.deserialize(new ByteArrayInputStream(dp.getContent), null).asInstanceOf[GenericData.Record])) case typ @ "TrafficData" => (typ, recordToTrafficData(deserializer.deserialize(new ByteArrayInputStream(dp.getContent), null).asInstanceOf[GenericData.Record])) } outputCollector.emit(new Values(data, dataType)) outputCollector.ack(tuple) } override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("data", "dataType")) // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToEnrichedTruckData(r: GenericRecord): EnrichedTruckData = EnrichedTruckData( r.get("eventTime").toString.toLong, r.get("truckId").toString.toInt, r.get("driverId").toString.toInt, r.get("driverName").toString, r.get("routeId").toString.toInt, r.get("routeName").toString, r.get("latitude").toString.toDouble, r.get("longitude").toString.toDouble, r.get("speed").toString.toInt, r.get("eventType").toString, r.get("foggy").toString.toInt, r.get("rainy").toString.toInt, r.get("windy").toString.toInt) // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToTrafficData(r: GenericRecord): TrafficData = TrafficData(r.get("eventTime").toString.toLong, r.get("routeId").toString.toInt, r.get("congestionLevel").toString.toInt) }
Example 27
Source File: BytesWithSchemaToObject.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.storm.bolts import java.io.ByteArrayInputStream import java.nio.charset.StandardCharsets import java.util import com.hortonworks.registries.schemaregistry.SchemaMetadata import com.hortonworks.registries.schemaregistry.avro.AvroSchemaProvider import com.hortonworks.registries.schemaregistry.client.SchemaRegistryClient import com.hortonworks.registries.schemaregistry.serdes.avro.AvroSnapshotDeserializer import com.orendainx.trucking.commons.models.{EnrichedTruckData, TrafficData} import com.typesafe.scalalogging.Logger import org.apache.avro.generic.{GenericData, GenericRecord} import org.apache.storm.task.{OutputCollector, TopologyContext} import org.apache.storm.topology.OutputFieldsDeclarer import org.apache.storm.topology.base.BaseRichBolt import org.apache.storm.tuple.{Fields, Tuple, Values} import scala.collection.JavaConversions._ // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToEnrichedTruckData(r: GenericRecord): EnrichedTruckData = EnrichedTruckData( r.get("eventTime").toString.toLong, r.get("truckId").toString.toInt, r.get("driverId").toString.toInt, r.get("driverName").toString, r.get("routeId").toString.toInt, r.get("routeName").toString, r.get("latitude").toString.toDouble, r.get("longitude").toString.toDouble, r.get("speed").toString.toInt, r.get("eventType").toString, r.get("foggy").toString.toInt, r.get("rainy").toString.toInt, r.get("windy").toString.toInt) // Helper function to convert GenericRecord (result of deserializing via Schema Registry) into JVM object private def recordToTrafficData(r: GenericRecord): TrafficData = TrafficData(r.get("eventTime").toString.toLong, r.get("routeId").toString.toInt, r.get("congestionLevel").toString.toInt) }
Example 28
Source File: DefaultAtmosphereFramework.scala From udash-core with Apache License 2.0 | 5 votes |
package io.udash.rpc.utils import com.typesafe.scalalogging.Logger import io.udash.rpc.serialization.{DefaultExceptionCodecRegistry, ExceptionCodecRegistry} import io.udash.rpc.{AtmosphereService, AtmosphereServiceConfig} import javax.servlet.ServletConfig import org.atmosphere.cpr.{ApplicationConfig, AtmosphereFramework} class DefaultAtmosphereFramework( config: AtmosphereServiceConfig[_], exceptionsRegistry: ExceptionCodecRegistry = new DefaultExceptionCodecRegistry, onRequestHandlingFailure: (Throwable, Logger) => Unit = (ex, logger) => logger.error("RPC request handling failed", ex) ) extends AtmosphereFramework { addInitParameter(ApplicationConfig.WEBSOCKET_SUPPORT, "true") addInitParameter(ApplicationConfig.PROPERTY_SESSION_SUPPORT, "true") addInitParameter(ApplicationConfig.PROPERTY_NATIVE_COMETSUPPORT, "true") addInitParameter(ApplicationConfig.DEFAULT_CONTENT_TYPE, "application/json") addInitParameter(ApplicationConfig.HEARTBEAT_INTERVAL_IN_SECONDS, "30") addInitParameter(ApplicationConfig.CLIENT_HEARTBEAT_INTERVAL_IN_SECONDS, "30") addInitParameter(ApplicationConfig.BROADCASTER_MESSAGE_PROCESSING_THREADPOOL_MAXSIZE, "4") addInitParameter(ApplicationConfig.BROADCASTER_ASYNC_WRITE_THREADPOOL_MAXSIZE, "4") addInitParameter(ApplicationConfig.BROADCASTER_SHARABLE_THREAD_POOLS, "true") addInitParameter(ApplicationConfig.BROADCASTER_LIFECYCLE_POLICY, "EMPTY_DESTROY") addInitParameter(ApplicationConfig.ANALYTICS, "false") override def init(sc: ServletConfig): AtmosphereFramework = { super.init(sc) addAtmosphereHandler("/*", new AtmosphereService(config, exceptionsRegistry, onRequestHandlingFailure = onRequestHandlingFailure)) } };
Example 29
Source File: RunServer.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.grpc.server import java.util.concurrent.{Executors, TimeUnit} import akka.Done import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.stream.{ActorMaterializer, Materializer} import com.typesafe.config.Config import com.typesafe.scalalogging.Logger import io.grpc.ServerBuilder import ml.combust.mleap.executor.MleapExecutor import ml.combust.mleap.pb.MleapGrpc import scala.concurrent.{ExecutionContext, Future} import scala.language.existentials import scala.util.{Failure, Success, Try} class RunServer(config: Config) (implicit system: ActorSystem) { private val logger = Logger(classOf[RunServer]) private var coordinator: Option[CoordinatedShutdown] = None def run(): Unit = { Try { logger.info("Starting MLeap gRPC Server") val coordinator = CoordinatedShutdown(system) this.coordinator = Some(coordinator) implicit val materializer: Materializer = ActorMaterializer() val grpcServerConfig = new GrpcServerConfig(config.getConfig("default")) val mleapExecutor = MleapExecutor(system) val port: Int = config.getInt("port") val threads: Option[Int] = if (config.hasPath("threads")) Some(config.getInt("threads")) else None val threadCount = threads.getOrElse { Math.min(Math.max(Runtime.getRuntime.availableProcessors() * 4, 32), 64) } logger.info(s"Creating thread pool for server with size $threadCount") val grpcThreadPool = Executors.newFixedThreadPool(threadCount) implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(grpcThreadPool) coordinator.addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "threadPoolShutdownNow") { () => Future { logger.info("Shutting down gRPC thread pool") grpcThreadPool.shutdown() grpcThreadPool.awaitTermination(5, TimeUnit.SECONDS) Done } } logger.info(s"Creating executor service") val grpcService: GrpcServer = new GrpcServer(mleapExecutor, grpcServerConfig) val builder = ServerBuilder.forPort(port) builder.intercept(new ErrorInterceptor) builder.addService(MleapGrpc.bindService(grpcService, ec)) val grpcServer = builder.build() logger.info(s"Starting server on port $port") grpcServer.start() coordinator.addTask(CoordinatedShutdown.PhaseServiceUnbind, "grpcServiceShutdown") { () => Future { logger.info("Shutting down gRPC") grpcServer.shutdown() grpcServer.awaitTermination(10, TimeUnit.SECONDS) Done }(ExecutionContext.global) } coordinator.addTask(CoordinatedShutdown.PhaseServiceStop, "grpcServiceShutdownNow") { () => Future { if (!grpcServer.isShutdown) { logger.info("Shutting down gRPC NOW!") grpcServer.shutdownNow() grpcServer.awaitTermination(5, TimeUnit.SECONDS) } Done }(ExecutionContext.global) } } match { case Success(_) => case Failure(err) => logger.error("Error encountered starting server", err) for (c <- this.coordinator) { c.run(CoordinatedShutdown.UnknownReason) } throw err } } }
Example 30
Source File: SlowLogMiddleware.scala From naptime with Apache License 2.0 | 5 votes |
package org.coursera.naptime.ari.graphql.controllers.middleware import com.typesafe.scalalogging.Logger import org.coursera.naptime.ari.graphql.SangriaGraphQlContext import sangria.execution.BeforeFieldResult import sangria.execution.Extension import sangria.execution.Middleware import sangria.execution.MiddlewareAfterField import sangria.execution.MiddlewareExtension import sangria.execution.MiddlewareQueryContext import sangria.schema.Context import sangria.execution.MiddlewareErrorField import sangria.slowlog.SlowLog import sangria.slowlog.QueryMetrics import scala.concurrent.duration._ import scala.util.Failure import scala.util.Success import scala.util.Try class SlowLogMiddleware(logger: Logger, isDebugMode: Boolean) extends Middleware[SangriaGraphQlContext] with MiddlewareExtension[SangriaGraphQlContext] with MiddlewareAfterField[SangriaGraphQlContext] with MiddlewareErrorField[SangriaGraphQlContext] { type QueryVal = QueryMetrics type FieldVal = Long private[this] val underlying = { if (isDebugMode) { SlowLog.extension } else { SlowLog(logger.underlying, threshold = 6 seconds) } } override def beforeQuery( context: MiddlewareQueryContext[SangriaGraphQlContext, _, _]): QueryMetrics = underlying.beforeQuery(context) override def afterQuery( queryVal: QueryMetrics, context: MiddlewareQueryContext[SangriaGraphQlContext, _, _]): Unit = underlying.afterQuery(queryVal, context) override def afterQueryExtensions( queryVal: QueryMetrics, context: MiddlewareQueryContext[SangriaGraphQlContext, _, _]): Vector[Extension[_]] = underlying.afterQueryExtensions(queryVal, context) // The next 2 functions are parametrized on `SangriaGraphQlContext` which makes them not usable // when directly passed into the delegate. Instead, we have to do a (safe) typecast. It is // wrapped as a `Try` to avoid future runtime errors. override def afterField( queryVal: QueryMetrics, fieldVal: Long, value: Any, mctx: MiddlewareQueryContext[SangriaGraphQlContext, _, _], ctx: Context[SangriaGraphQlContext, _]): Option[Any] = { val safeContext = Try(ctx.asInstanceOf[Context[Any, _]]) safeContext match { case Success(c) => underlying.afterField(queryVal, fieldVal, value, mctx, c) case Failure(_) => None } } override def fieldError( queryVal: QueryMetrics, fieldVal: Long, error: Throwable, mctx: MiddlewareQueryContext[SangriaGraphQlContext, _, _], ctx: Context[SangriaGraphQlContext, _]): Unit = { val safeContext = Try(ctx.asInstanceOf[Context[Any, _]]) safeContext match { case Success(c) => underlying.fieldError(queryVal, fieldVal, error, mctx, c) case Failure(_) => () } } // We cannot use the same type casting method as the above 2 classes because the return type // is parametrized, so we just copy the underlying impl. It is very straightforward [just // recording the timestamp] so we don't think it will change. override def beforeField( queryVal: QueryMetrics, mctx: MiddlewareQueryContext[SangriaGraphQlContext, _, _], ctx: Context[SangriaGraphQlContext, _]): BeforeFieldResult[SangriaGraphQlContext, Long] = continue(System.nanoTime()) }
Example 31
Source File: package.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan import java.io.{File, FileOutputStream} import java.net.URL import java.nio.file.{Files, Path} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory package object tools { private lazy val logger = Logger(LoggerFactory.getLogger("milan")) def addToSbtClasspath(paths: Seq[Path]): Unit = { val urls = paths.map(_.toUri.toURL).toList urls.foreach(url => logger.info(s"Adding {$url} to classpath.")) val classLoader = this.getClass.getClassLoader val addMethod = classLoader.getClass.getDeclaredMethod("add", classOf[Seq[URL]]) addMethod.invoke(classLoader, urls) } def compileApplicationInstance(providerClassName: String, providerParameters: List[(String, String)], compilerClassName: String, compilerParameters: List[(String, String)], outputFile: Path): File = { val providerClass = ClassHelper.loadClass(providerClassName) val provider = providerClass.getConstructors.find(_.getParameterCount == 0) match { case None => throw new Exception(s"Provider class $providerClassName does not have a default constructor.") case Some(constructor) => constructor.newInstance().asInstanceOf[ApplicationInstanceProvider] } val instance = provider.getApplicationInstance(providerParameters) val actualCompilerClassName = KnownCompilers.convertFromKnownCompiler(compilerClassName) val compilerClass = ClassHelper.loadClass(actualCompilerClassName) val compiler = compilerClass.getConstructors.find(_.getParameterCount == 0) match { case None => throw new Exception(s"Compiler class $actualCompilerClassName does not have a default constructor.") case Some(constructor) => constructor.newInstance().asInstanceOf[ApplicationInstanceCompiler] } println(s"Writing generated code to output file '$outputFile'.") Files.createDirectories(outputFile.getParent) val outputStream = new FileOutputStream(outputFile.toFile) try { compiler.compile(instance, compilerParameters, outputStream) outputFile.toFile } finally { outputStream.close() } } }
Example 32
Source File: GenericTypedJsonSerializer.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.serialization import com.fasterxml.jackson.core.JsonGenerator import com.fasterxml.jackson.databind.{JsonSerializer, SerializerProvider} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class GenericTypedJsonSerializer[T <: GenericTypeInfoProvider] extends JsonSerializer[T] { private val logger = Logger(LoggerFactory.getLogger(getClass)) override def serialize(value: T, jsonGenerator: JsonGenerator, serializerProvider: SerializerProvider): Unit = { val typeName = value.getTypeName val genericArgs = value.getGenericArguments logger.info(s"Serializing type '$typeName[${genericArgs.map(_.fullName).mkString(", ")}]'.") jsonGenerator.writeStartObject() jsonGenerator.writeStringField("_type", value.getTypeName) jsonGenerator.writeObjectField("_genericArgs", value.getGenericArguments) jsonGenerator.writeObject(value) jsonGenerator.writeEndObject() } }
Example 33
Source File: ReflectionTypeProvider.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.typeutil import java.time.{Duration, Instant} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class ReflectionTypeProvider(classLoader: ClassLoader) extends TypeProvider { private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) private val knownTypesByClass = Map[Class[_], TypeDescriptor[_]]( classOf[Int] -> types.Int, classOf[Long] -> types.Long, classOf[Double] -> types.Double, classOf[Float] -> types.Float, classOf[Boolean] -> types.Boolean, classOf[Instant] -> types.Instant, classOf[Duration] -> types.Duration, classOf[String] -> types.String, classOf[Nothing] -> types.Nothing ) private val knownTypesByName = Map[String, TypeDescriptor[_]](this.knownTypesByClass.values.map(td => td.typeName -> td).toList: _*) override def getTypeDescriptor[T](typeName: String, genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[T] = { this.knownTypesByName.get(typeName) match { case Some(typeDesc) => typeDesc.asInstanceOf[TypeDescriptor[T]] case None => val alternatives = Seq( typeName, this.replaceLastDotWithDollar(typeName), s"scala.$typeName") // Return the first Class we find in the sequence of alternative class names. alternatives .map(this.tryFindClass) .filter(_.nonEmpty) .map(_.get) .headOption match { case Some(cls) => this.generateTypeDescriptor[T](typeName, cls, genericArguments) case None => this.logger.error(s"Couldn't generate TypeDescriptor for type '$typeName'.") null } } } private def tryFindClass(className: String): Option[Class[_]] = { try { Some(this.classLoader.loadClass(className)) } catch { case _: ClassNotFoundException => None } } private def replaceLastDotWithDollar(className: String): String = { className.lastIndexOf('.') match { case i if (i < 0) || (i == className.length - 1) => className case i => className.substring(0, i) + "$" + className.substring(i + 1) } } private def generateTypeDescriptor[T](typeName: String, cls: Class[_], genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[T] = { this.logger.debug(s"Generating type descriptor for '$typeName'.") if (TypeDescriptor.isTupleTypeName(typeName)) { new TupleTypeDescriptor[T](typeName, genericArguments, List()) } else { val fieldFields = cls.getDeclaredFields .map(field => FieldDescriptor(field.getName, this.getTypeDescriptor(field.getType, List()))) .toList new ObjectTypeDescriptor[T](typeName, genericArguments, fieldFields) } } private def getTypeDescriptor(cls: Class[_], genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[_] = { this.knownTypesByClass.get(cls) match { case Some(typeDesc) => typeDesc case None => this.generateTypeDescriptor[Any](cls.getCanonicalName, cls, genericArguments) } } }
Example 34
Source File: TypeFactory.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.typeutil import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class TypeFactory(typeProviders: List[TypeProvider]) { private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) def this() { this(TypeFactory.listTypeProviders().toList) } def getTypeDescriptor[T](typeName: String): TypeDescriptor[T] = { val genericArgs = if (TypeDescriptor.isGenericTypeName(typeName)) { val genericArgTypeNames = getGenericArgumentTypeNames(typeName) genericArgTypeNames.map(this.getTypeDescriptor[Any]) } else { List() } val typeBaseName = getTypeNameWithoutGenericArguments(typeName) val providers = this.typeProviders.filter(_.canProvideType(typeBaseName)) if (providers.isEmpty) { throw new TypeNotPresentException(typeName, null) } else if (providers.length > 1) { this.logger.warn(s"Type '$typeName' has more than one provider. An arbitrary provider will be used.") } providers.map(_.getTypeDescriptor[T](typeBaseName, genericArgs)).find(_ != null) match { case Some(typeDescriptor) => typeDescriptor case None => throw new TypeNotPresentException(typeName, null) } } } object TypeFactory { private var typeProviders: List[TypeProvider] = List(new ReflectionTypeProvider(getClass.getClassLoader)) def overrideTypeProviders(provider: TypeProvider): Unit = { this.typeProviders = List(provider) } def registerTypeProvider(provider: TypeProvider): Unit = { this.typeProviders = this.typeProviders :+ provider } def listTypeProviders(): Iterable[TypeProvider] = { this.typeProviders } }
Example 35
Source File: FlinkGenerator.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.generator import java.io.{ByteArrayOutputStream, OutputStream} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.nio.file.{Files, Path, StandardOpenOption} import com.amazon.milan.application.{Application, ApplicationConfiguration, ApplicationInstance} import com.amazon.milan.compiler.flink.internal.FlinkTypeEmitter import com.amazon.milan.lang.StreamGraph import com.amazon.milan.program.{Cycle, StreamExpression} import com.amazon.milan.{Id, SemanticVersion} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory case class GeneratorConfig(preventGenericTypeInformation: Boolean = false) object FlinkGenerator { val default = new FlinkGenerator(GeneratorConfig()) } class FlinkGenerator(classLoader: ClassLoader, generatorConfig: GeneratorConfig) { private val generatorTypeLifter = new FlinkTypeLifter(new FlinkTypeEmitter, this.generatorConfig.preventGenericTypeInformation) private val logger = Logger(LoggerFactory.getLogger(getClass)) def this(generatorConfig: GeneratorConfig) { this(getClass.getClassLoader, generatorConfig) } def generateScala(graph: StreamGraph, appConfig: ApplicationConfiguration, packageName: String, className: String): String = { val application = new Application(Id.newId(), graph, SemanticVersion.ZERO) val instance = new ApplicationInstance(Id.newId(), application, appConfig) this.generateScala(instance, packageName, className) } def generateScala(instance: ApplicationInstance, outputPath: Path, packageName: String, className: String): Unit = { val scalaCode = this.generateScala(instance, packageName, className) val contents = scalaCode.getBytes(StandardCharsets.UTF_8) Files.write(outputPath, contents, StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING) } def generateScala(instance: ApplicationInstance, packageName: String, className: String): String = { val output = new ByteArrayOutputStream() this.generateScala(instance, output, packageName, className) output.flush() StandardCharsets.UTF_8.decode(ByteBuffer.wrap(output.toByteArray)).toString } def generateScala(instance: ApplicationInstance, output: OutputStream, packageName: String, className: String): Unit = { val finalGraph = instance.application.graph.getDereferencedGraph finalGraph.typeCheckGraph() val outputs = new GeneratorOutputs(this.generatorTypeLifter) val context = GeneratorContext.createEmpty(instance.instanceDefinitionId, finalGraph, instance.config, outputs, this.generatorTypeLifter) // Ensure that every data stream is generated. finalGraph .getStreams .foreach(stream => this.ensureStreamIsGenerated(context, stream)) // Close any cycles. finalGraph .getStreams .filter(_.isInstanceOf[Cycle]) .map(_.asInstanceOf[Cycle]) .foreach(context.closeCycle) // Add all sinks at the end. instance.config.dataSinks.foreach(sink => context.generateSink(sink)) val generated = context.output.generateScala(packageName, className) output.write(generated.getBytes(StandardCharsets.UTF_8)) } private def ensureStreamIsGenerated(context: GeneratorContext, stream: StreamExpression): Unit = { context.getOrGenerateDataStream(stream) } }
Example 36
Source File: DataSourceUtil.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.runtime import com.amazon.milan.application.sources.FileDataSource import com.amazon.milan.dataformats.DataInputFormat import com.amazon.milan.compiler.flink.types.{ByteArrayDataFormatFlatMapFunction, ByteArrayInputFormat, ByteArrayRecordTypeInformation} import com.typesafe.scalalogging.Logger import org.apache.flink.api.common.io.FilePathFilter import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.streaming.api.datastream.{DataStreamSource, SingleOutputStreamOperator} import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment import org.apache.flink.streaming.api.functions.source.FileProcessingMode import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ object DataSourceUtil { private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) def addFileDataSource[T](env: StreamExecutionEnvironment, path: String, dataFormat: DataInputFormat[T], configuration: FileDataSource.Configuration, recordTypeInformation: TypeInformation[T]): SingleOutputStreamOperator[T] = { this.logger.info(s"Adding file '$path' as an input to the streaming environment. ") val inputFormat = new ByteArrayInputFormat inputFormat.setFilesFilter(FilePathFilter.createDefaultFilter()) val processingMode = configuration.readMode match { case FileDataSource.ReadMode.Continuous => FileProcessingMode.PROCESS_CONTINUOUSLY case FileDataSource.ReadMode.Once => FileProcessingMode.PROCESS_ONCE } val changeCheckIntervalMs = processingMode match { case FileProcessingMode.PROCESS_CONTINUOUSLY => 5000L case _ => -1L } val inputLines = env.readFile( inputFormat, path, processingMode, changeCheckIntervalMs, new ByteArrayRecordTypeInformation) val mapper = new ByteArrayDataFormatFlatMapFunction[T](dataFormat, recordTypeInformation) inputLines.flatMap(mapper) } def addListDataSource[T](env: StreamExecutionEnvironment, values: List[T], runForever: Boolean, recordTypeInformation: TypeInformation[T]): DataStreamSource[T] = { if (runForever) { // If we don't want the source to terminate after the elements run out then we need to use a custom source // function rather than env.fromCollection. In order to not cause duplicate records to be sent from multiple // copies of the source function we set the parallelism to 1. val source = new ListSourceFunction[T](values, runForever) env.addSource(source, recordTypeInformation).setParallelism(1) } else { env.fromCollection(values.asJavaCollection, recordTypeInformation) } } }
Example 37
Source File: ListSourceFunction.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.runtime import com.typesafe.scalalogging.Logger import org.apache.flink.streaming.api.functions.source.SourceFunction import org.slf4j.LoggerFactory class ListSourceFunction[T](values: List[T], runForever: Boolean) extends SourceFunction[T] { @transient private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) private var running = false override def run(sourceContext: SourceFunction.SourceContext[T]): Unit = { this.running = true values.foreach(sourceContext.collect) if (this.runForever) { this.logger.info(s"ListSourceFunction items exhausted, awaiting cancellation.") while (this.running) { Thread.sleep(100) } } } override def cancel(): Unit = { this.logger.info(s"ListSourceFunction cancelled.") this.running = false } }
Example 38
Source File: MilanApplicationBase.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.runtime import java.net.URLClassLoader import com.amazon.milan.cmd.{ArgumentsBase, NamedArgument} import com.typesafe.scalalogging.Logger import org.apache.flink.contrib.streaming.state.RocksDBStateBackend import org.apache.flink.runtime.state.StateBackend import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic} import org.slf4j.LoggerFactory class MilanApplicationCmdArgs extends ArgumentsBase { @NamedArgument(Name = "max-parallelism", ShortName = "mp", Required = false, DefaultValue = "0") var maxParallelism: Int = 0 @NamedArgument(Name = "list-classpath", ShortName = "listcp", Required = false, DefaultValue = "false") var listClassPath: Boolean = false @NamedArgument(Name = "state-backend", ShortName = "sbe", Required = false, DefaultValue = "default") var stateBackend: String = _ @NamedArgument(Name = "checkpoint-directory", ShortName = "cd", Required = false, DefaultValue = "file:///tmp/checkpoints") var checkpointDirectory: String = _ @NamedArgument(Name = "checkpoint-interval", ShortName = "ci", Required = false, DefaultValue = "30") var checkpointIntervalSeconds: Int = 30 } abstract class MilanApplicationBase { private val logger = Logger(LoggerFactory.getLogger(getClass)) def buildFlinkApplication(env: StreamExecutionEnvironment): Unit def hasCycles: Boolean def execute(args: Array[String]): Unit = { val cmdArgs = new MilanApplicationCmdArgs cmdArgs.parse(args, allowUnknownArguments = true) if (cmdArgs.listClassPath) { this.listClassPathUrls() } val env = StreamExecutionEnvironment.getExecutionEnvironment env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) if (cmdArgs.maxParallelism > 0) { env.setMaxParallelism(cmdArgs.maxParallelism) } cmdArgs.stateBackend match { case "rocksdb" => this.logger.info("Using RocksDB state back-end.") env.setStateBackend(new RocksDBStateBackend(cmdArgs.checkpointDirectory, true).asInstanceOf[StateBackend]) case "default" => () case _ => throw new IllegalArgumentException("state-backend must be one of: rocksdb, default") } if (!this.hasCycles) { env.enableCheckpointing(cmdArgs.checkpointIntervalSeconds * 1000, CheckpointingMode.AT_LEAST_ONCE) } this.buildFlinkApplication(env) env.execute() } private def listClassPathUrls(): Unit = { ClassLoader.getSystemClassLoader match { case urlClassLoader: URLClassLoader => urlClassLoader.getURLs.foreach(url => logger.info(s"ClassPath: $url")) case _ => this.logger.error(s"Can't list ClassPath URLs for ClassLoader of type '${ClassLoader.getSystemClassLoader.getClass.getName}'.") } } }
Example 39
Source File: UnpackOptionProcessFunction.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.runtime import com.amazon.milan.compiler.flink.types.{RecordWrapper, RecordWrapperTypeInformation} import com.typesafe.scalalogging.Logger import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.typeutils.ResultTypeQueryable import org.apache.flink.streaming.api.functions.ProcessFunction import org.apache.flink.util.Collector import org.slf4j.LoggerFactory class UnpackOptionProcessFunction[T >: Null, TKey >: Null <: Product](recordType: TypeInformation[T], keyType: TypeInformation[TKey]) extends ProcessFunction[RecordWrapper[Option[T], TKey], RecordWrapper[T, TKey]] with ResultTypeQueryable[RecordWrapper[T, TKey]] { @transient private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) override def processElement(record: RecordWrapper[Option[T], TKey], context: ProcessFunction[RecordWrapper[Option[T], TKey], RecordWrapper[T, TKey]]#Context, collector: Collector[RecordWrapper[T, TKey]]): Unit = { if (record.value.isDefined) { collector.collect(RecordWrapper.wrap(record.value.get, record.key, record.sequenceNumber)) } } override def getProducedType: TypeInformation[RecordWrapper[T, TKey]] = RecordWrapperTypeInformation.wrap(this.recordType, this.keyType) }
Example 40
Source File: KinesisDataSource.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.runtime import java.util.Properties import com.amazon.milan.dataformats.DataInputFormat import com.amazon.milan.serialization.MilanObjectMapper import com.typesafe.scalalogging.Logger import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.streaming.api.datastream.DataStreamSource import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment import org.apache.flink.streaming.connectors.kinesis.FlinkKinesisConsumer import org.apache.flink.streaming.connectors.kinesis.config.{AWSConfigConstants, ConsumerConfigConstants} import org.apache.flink.streaming.connectors.kinesis.serialization.KinesisDeserializationSchema import org.slf4j.LoggerFactory object KinesisDataSource { private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) def addDataSource[T](env: StreamExecutionEnvironment, streamName: String, region: String, dataFormat: DataInputFormat[T], recordTypeInformation: TypeInformation[T]): DataStreamSource[T] = { this.logger.info(s"Creating Kinesis consumer for stream '$streamName', region '$region'.") val config = this.getConsumerProperties(region) val schema = new JsonDeserializationSchema[T](recordTypeInformation) val source = new FlinkKinesisConsumer[T](streamName, schema, config) env.addSource(source) } private def getConsumerProperties(region: String): Properties = { val config = new Properties() config.setProperty(AWSConfigConstants.AWS_REGION, region) config.setProperty(AWSConfigConstants.AWS_CREDENTIALS_PROVIDER, AWSConfigConstants.CredentialProvider.AUTO.toString) config.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.LATEST.toString) config } } class JsonDeserializationSchema[T](recordTypeInformation: TypeInformation[T]) extends KinesisDeserializationSchema[T] { override def deserialize(bytes: Array[Byte], partitionKey: String, seqNum: String, approxArrivalTimestamp: Long, stream: String, shardId: String): T = { MilanObjectMapper.readValue[T](bytes, this.recordTypeInformation.getTypeClass) } override def getProducedType: TypeInformation[T] = this.recordTypeInformation }
Example 41
Source File: FlinkSingletonMemorySink.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink.testing import com.amazon.milan.compiler.scala.RuntimeEvaluator import com.amazon.milan.compiler.flink.types.ArrayRecord import com.amazon.milan.typeutil.TypeDescriptor import com.amazon.milan.{Id, application} import com.typesafe.scalalogging.Logger import org.apache.flink.streaming.api.functions.sink.SinkFunction import org.slf4j.LoggerFactory class TupleSingletonMemorySinkFunction[T](id: String, typeDescriptor: TypeDescriptor[T]) extends SinkFunction[ArrayRecord] { @transient private lazy val createInstanceFunc = this.compileCreateInstanceFunc() @transient private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) override def invoke(value: ArrayRecord): Unit = { // Convert the value to the expected Tuple type before adding it to the sink. val tupleValue = this.createInstanceFunc(value.values) application.sinks.SingletonMemorySink.add[T](this.id, tupleValue) } private def compileCreateInstanceFunc(): Array[Any] => T = { val eval = RuntimeEvaluator.instance val fieldTypeNames = this.typeDescriptor.genericArguments.map(_.fullName) // Create statements that get the tuple values from the list and cast them to the // expected type for the corresponding tuple element. val fieldValueGetters = fieldTypeNames.zipWithIndex.map { case (f, i) => s"values($i).asInstanceOf[$f]" } val fieldValuesStatement = fieldValueGetters.mkString(", ") val tupleCreationStatement = s"${this.typeDescriptor.typeName}($fieldValuesStatement)" this.logger.info(s"Compiling tuple creation function: $tupleCreationStatement") eval.createFunction[Array[Any], T]( "values", "Array[Any]", tupleCreationStatement ) } }
Example 42
Source File: WordVectorActor.scala From tap with Apache License 2.0 | 5 votes |
package io.heta.tap.analysis.wordvector import java.io.File import akka.actor.Actor import com.typesafe.scalalogging.Logger import io.heta.tap.analysis.wordvector.WordVectorActor._ import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer import org.deeplearning4j.models.word2vec.Word2Vec import scala.collection.JavaConverters._ import scala.util.Try object WordVectorActor { object INIT case class getNearestWords(word:String, numberOfNearestWords: Int) } class WordVectorActor extends Actor { val logger: Logger = Logger(this.getClass) val gModel = new File("models/googleNews/GoogleNews-vectors-negative300.bin.gz") val vec= Try(Some(WordVectorSerializer.readWord2VecModel(gModel))).getOrElse(None) def receive: PartialFunction[Any,Unit] = { case INIT => sender ! init case gNearestWords: getNearestWords => sender ! getNearestWords(gNearestWords.word, gNearestWords.numberOfNearestWords) case msg:Any => logger.error(s"WordVectorActor received unknown msg: $msg") } def init:Boolean = { vec != None } def getNearestWords(word:String, numberOfNearestWords: Int): Option[Array[String]] = { if (vec!= None) { val wordCollection = vec.get.wordsNearest(word, numberOfNearestWords) return Some(wordCollection.asScala.toArray) } else return None } }
Example 43
Source File: CluAnnotatorActor.scala From tap with Apache License 2.0 | 5 votes |
package io.heta.tap.analysis.clu import akka.actor.Actor import com.typesafe.scalalogging.Logger import io.heta.tap.analysis.clu.CluAnnotatorActor.{AnnotateRequest, INIT} import org.clulab.processors.Document import org.clulab.processors.clu.CluProcessor object CluAnnotatorActor { object INIT sealed trait Request case class AnnotateRequest(text:String) extends Request } class CluAnnotatorActor extends Actor { val logger: Logger = Logger(this.getClass) val processor = new CluProcessor() def receive: PartialFunction[Any,Unit] = { case INIT => sender ! init case annotate: AnnotateRequest => sender ! createAnnotatedDoc(annotate) case msg:Any => logger.error(s"CluAnnotator received unkown msg: ${msg.toString}") // scalastyle:ignore } def init: Boolean = { logger.warn("Initialising CluProcessor") val text = """CluProcessor is starting up!""" val aDoc = processor.mkDocument(text) processor.annotate(aDoc) val result:Boolean = aDoc.sentences.length==1 if (result) { logger.info(s"Successfully initialised CluProcessor") } else { logger.error("Unable to initialise Cluprocessor:") } result } def createAnnotatedDoc(annotate:AnnotateRequest):Document = { logger.warn("In the annotator, creating the document") val doc = processor.mkDocument(annotate.text) processor.annotate(doc) } }
Example 44
Source File: AsyncAnalysisActorInitialiser.scala From tap with Apache License 2.0 | 5 votes |
package modules import com.google.inject.AbstractModule import com.typesafe.scalalogging.Logger import io.heta.tap.analysis.batch.BatchActor import io.heta.tap.analysis.clu.CluAnnotatorActor import play.api.libs.concurrent.AkkaGuiceSupport class AsyncAnalysisActorInitialiser extends AbstractModule with AkkaGuiceSupport { val logger: Logger = Logger(this.getClass) override def configure():Unit = { logger.info("Binding BatchActor") bindActor[BatchActor]("batch") logger.info("Binding CluAnnotatorActor") bindActor[CluAnnotatorActor]("cluAnnotator") //bindActor[AffectLexiconActor]("affectlexicon") //bindActor[WordVectorActor]("wordvector") } }
Example 45
Source File: package.scala From ionroller with MIT License | 5 votes |
package ionroller import com.typesafe.scalalogging.Logger import play.api.libs.json.Json import scalaz.Scalaz._ import scalaz._ import scalaz.concurrent.Task import scalaz.stream.Cause.EarlyCause import scalaz.stream.Process.{Await, Emit, Halt, Step} import scalaz.stream._ trait Stepper[F[_], A] { def next: OptionT[F, Seq[A]] def close: F[Unit] } package object stream { import JsonUtil.Implicits._ implicit class RatelimitedProcessSyntax[O](p: Process[Task, O]) { def logDebug(logger: Logger, f: O => String) = { val logStringProcess: Sink[Task, String] = Process.constant({ i: String => Task.delay(logger.debug(i)) }) p.observe(logStringProcess.contramap(f)) } def logInfo(logger: Logger, f: O => String) = { val logStringProcess: Sink[Task, String] = Process.constant({ i: String => Task.delay(logger.info(i)) }) p.observe(logStringProcess.contramap(f)) } def logDebugJson(logger: Logger) = logDebug(logger, i => Json.prettyPrint(i.toJsonValue)) def logThrowable[T](logger: Logger, msg: String)(implicit ev: O <:< \/[Throwable, T]): Process[Task, T] = { val logThrowableProcess = Process.constant({ i: \/[Throwable, T] => i match { case -\/(t) => Task.delay(logger.error(msg, t)) case _ => Task.now(()) } }) p.map(ev.apply).observe(logThrowableProcess).collect { case \/-(v) => v } } } def emitChanges[A, B](differ: (A, A) => Seq[B])(implicit equal: Equal[A]): Process1[A, B] = { def emitChangesProcess(items: Vector[A]) = { if (items.size <= 1) Process.empty else Process.emitAll(differ(items(0), items(1))) } process1.distinctConsecutive[A].sliding(2).flatMap(emitChangesProcess) } def step[A](p: Process[Task, A]): Stepper[Task, A] = new Stepper[Task, A] { var state = p def next: OptionT[Task, Seq[A]] = state.step match { case Halt(_) => OptionT.none case Step(Emit(as: Seq[A]), cont) => state = cont.continue OptionT(as.point[Task] map some) case Step(Await(req: Task[_] @unchecked, rcv), cont) => for { tail <- (req.attempt map { r => rcv(EarlyCause fromTaskResult r).run +: cont }).liftM[OptionT] _ = state = tail back <- next } yield back } def close = Task.suspend { Task.delay(state = state.kill) >> state.run } } }
Example 46
Source File: ServerMain.scala From scastie with Apache License 2.0 | 5 votes |
package com.olegych.scastie.web import com.olegych.scastie.web.routes._ import com.olegych.scastie.web.oauth2._ import com.olegych.scastie.balancer._ import com.olegych.scastie.util.ScastieFileUtil import akka.http.scaladsl._ import server.Directives._ import ch.megard.akka.http.cors.scaladsl.CorsDirectives._ import com.typesafe.config.ConfigFactory import com.typesafe.scalalogging.Logger import akka.actor.{ActorSystem, Props} import akka.stream.ActorMaterializer import scala.concurrent.duration._ import scala.concurrent.Await object ServerMain { def main(args: Array[String]): Unit = { val logger = Logger("ServerMain") val port = if (args.isEmpty) 9000 else args.head.toInt val config2 = ConfigFactory.load().getConfig("akka.remote.netty.tcp") println("akka tcp config") println(config2.getString("hostname")) println(config2.getInt("port")) val config = ConfigFactory.load().getConfig("com.olegych.scastie.web") val production = config.getBoolean("production") if (production) { ScastieFileUtil.writeRunningPid() } implicit val system: ActorSystem = ActorSystem("Web") import system.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() val github = new Github val session = new GithubUserSession(system) val userDirectives = new UserDirectives(session) val progressActor = system.actorOf( Props[ProgressActor], name = "ProgressActor" ) val statusActor = system.actorOf( StatusActor.props, name = "StatusActor" ) val dispatchActor = system.actorOf( Props(new DispatchActor(progressActor, statusActor)), name = "DispatchActor" ) val routes = concat( cors()( pathPrefix("api")( concat( new ApiRoutes(dispatchActor, userDirectives).routes, new ProgressRoutes(progressActor).routes, new DownloadRoutes(dispatchActor).routes, new StatusRoutes(statusActor, userDirectives).routes, new ScalaJsRoutes(dispatchActor).routes ) ) ), new OAuth2Routes(github, session).routes, cors()( concat( new ScalaLangRoutes(dispatchActor, userDirectives).routes, new FrontPageRoutes(production).routes ) ) ) Await.result(Http().bindAndHandle(routes, "0.0.0.0", port), 1.seconds) logger.info(s"Scastie started (port: $port)") // scala.io.StdIn.readLine("press enter to stop server") // system.terminate() Await.result(system.whenTerminated, Duration.Inf) () } }
Example 47
Source File: StringUnmarshaller.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.admin.directives import akka.http.scaladsl.unmarshalling.{FromStringUnmarshaller, Unmarshaller} import ch.epfl.bluebrain.nexus.admin.exceptions.AdminError.InvalidFormat import com.typesafe.scalalogging.Logger import io.circe.parser._ import io.circe.{Decoder, Json} object StringUnmarshaller { private val logger = Logger[this.type] def unmarshallJson[A: Decoder]: FromStringUnmarshaller[A] = unmarshaller { value => parse(value).left.map { err => logger.warn(s"Failed to convert string '$value' to Json", err) InvalidFormat } } private def unmarshaller[A]( f: String => Either[Throwable, Json] )(implicit dec: Decoder[A]): FromStringUnmarshaller[A] = Unmarshaller.strict[String, A] { case "" => throw Unmarshaller.NoContentException case string => f(string).flatMap(_.as[A]) match { case Right(value) => value case Left(err) => throw new IllegalArgumentException(err) } } }
Example 48
Source File: RepairFromMessages.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.admin import java.util.UUID import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import ch.epfl.bluebrain.nexus.admin.organizations.Organizations import ch.epfl.bluebrain.nexus.admin.projects.Projects import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import scala.concurrent.Future import scala.util.Try object RepairFromMessages { private val log = Logger[RepairFromMessages.type] def repair( o: Organizations[Task], p: Projects[Task] )(implicit as: ActorSystem, sc: Scheduler): Future[Unit] = { val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) pq.currentPersistenceIds() .mapAsync(1) { case OrgId(uuid) => (o.fetch(uuid) >> Task.unit).runToFuture case ProjId(uuid) => (p.fetch(uuid) >> Task.unit).runToFuture case other => log.warn(s"Unknown persistence id '$other'") Future.successful(()) } .runFold(0) { case (acc, _) => if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .map(_ => ()) } sealed abstract class PersistenceId(prefix: String) { private val len = prefix.length def unapply(arg: String): Option[UUID] = if (arg.startsWith(prefix)) Try(UUID.fromString(arg.drop(len))).toOption else None } object OrgId extends PersistenceId("organizations-") object ProjId extends PersistenceId("projects-") }
Example 49
Source File: ElasticSearchBaseClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.commons.es.client import akka.http.scaladsl.model.StatusCodes.GatewayTimeout import akka.http.scaladsl.model.{HttpRequest, StatusCode, StatusCodes} import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchBaseClient._ import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchFailure.{ElasticServerError, ElasticUnexpectedError} import ch.epfl.bluebrain.nexus.commons.http.HttpClient.UntypedHttpClient import ch.epfl.bluebrain.nexus.sourcing.RetryStrategyConfig import com.typesafe.scalalogging.Logger import retry.CatsEffect._ import retry.syntax.all._ import retry.{RetryDetails, RetryPolicy} import scala.util.control.NonFatal private[client] def sanitize(index: String, allowWildCard: Boolean): String = { val regex = if (allowWildCard) """[\s|"|\\|<|>|\||,|/|?]""" else """[\s|"|*|\\|<|>|\||,|/|?]""" index.replaceAll(regex, "_").dropWhile(_ == '_') } } object ElasticSearchBaseClient { private[client] val docType = "_doc" private[client] val source = "_source" private[client] val anyIndexPath = "_all" private[client] val ignoreUnavailable = "ignore_unavailable" private[client] val allowNoIndices = "allow_no_indices" private[client] val trackTotalHits = "track_total_hits" }
Example 50
Source File: ElasticSearchIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object ElasticSearchIndexer { implicit private val log: Logger = Logger[ElasticSearchIndexer.type] final def start[F[_]: Timer]( view: ElasticSearchView, resources: Resources[F], project: Project, restartOffset: Boolean )(implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: ServiceConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.kg.elasticSearch.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.kg.elasticSearch.askTimeout) val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.kg.elasticSearch.indexing.retry) def deleteOrIndex(res: ResourceV): Option[BulkOp] = if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res)) else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc)) def delete(res: ResourceV): BulkOp = BulkOp.Delete(view.index, res.id.value.asString) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res) case res if view.allowedSchemas(res) => Some(delete(res)) } .collectSome[BulkOp] .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 51
Source File: StorageIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import java.time.Instant import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache} import ch.epfl.bluebrain.nexus.kg.config.KgConfig.StorageConfig import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object StorageIndexer { implicit private val log = Logger[StorageIndexer.type] def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])(implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) implicit val storageConfig: StorageConfig = config.kg.storage val name = "storage-indexer" def toStorage(event: Event): F[Option[(Storage, Instant)]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => storages.fetchStorage(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(timedStorage) => Some(timedStorage) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toStorage) .collectSome[(Storage, Instant)] .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 52
Source File: ResolverIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache} import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ResolverIndexer { implicit private val log = Logger[ResolverIndexer.type] final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])(implicit projectCache: ProjectCache[F], as: ActorSystem, F: Effect[F], projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) val name = "resolver-indexer" def toResolver(event: Event): F[Option[Resolver]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => resolvers.fetchResolver(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(resolver) => Some(resolver) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toResolver) .collectSome[Resolver] .runAsync(resolverCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 53
Source File: ViewIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache} import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ViewIndexer { implicit private val log = Logger[ViewIndexer.type] def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])(implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) val name = "view-indexer" def toView(event: Event): F[Option[View]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => views.fetchView(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(view) => Some(view) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toView) .collectSome[View] .runAsync(viewCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 54
Source File: EventSource.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.client import java.util.UUID import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.persistence.query.{NoOffset, Offset, Sequence, TimeBasedUUID} import akka.stream.Materializer import akka.stream.alpakka.sse.scaladsl.{EventSource => SSESource} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.rdf.implicits._ import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri import com.typesafe.scalalogging.Logger import io.circe.Decoder import io.circe.parser.decode import scala.concurrent.{ExecutionContext, Future} import scala.util.Try trait EventSource[A] { def apply[A: Decoder]( config: KgClientConfig )(implicit as: ActorSystem, mt: Materializer, ec: ExecutionContext): EventSource[A] = new EventSource[A] { private val logger = Logger[this.type] private val http = Http() private def addCredentials(request: HttpRequest)(implicit cred: Option[AccessToken]): HttpRequest = cred.map(token => request.addCredentials(OAuth2BearerToken(token.value))).getOrElse(request) private def send(request: HttpRequest)(implicit cred: Option[AccessToken]): Future[HttpResponse] = http.singleRequest(addCredentials(request)).map { resp => if (!resp.status.isSuccess()) logger.warn(s"HTTP response when performing SSE request: status = '${resp.status}'") resp } private def toOffset(id: String): Offset = Try(TimeBasedUUID(UUID.fromString(id))).orElse(Try(Sequence(id.toLong))).getOrElse(NoOffset) override def apply(iri: AbsoluteIri, offset: Option[String])(implicit cred: Option[AccessToken] ): Source[(Offset, A), NotUsed] = SSESource(iri.asAkka, send, offset, config.sseRetryDelay).flatMapConcat { sse => val offset = sse.id.map(toOffset).getOrElse(NoOffset) decode[A](sse.data) match { case Right(ev) => Source.single(offset -> ev) case Left(err) => logger.error(s"Failed to decode admin event '$sse'", err) Source.empty } } } }
Example 55
Source File: package.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import cats.Monad import cats.implicits._ import ch.epfl.bluebrain.nexus.kg.resources.syntax._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.iam.acls.{AccessControlList, AccessControlLists} import ch.epfl.bluebrain.nexus.kg.cache.ProjectCache import ch.epfl.bluebrain.nexus.rdf.Iri.Path import ch.epfl.bluebrain.nexus.rdf.Iri.Path._ import com.typesafe.scalalogging.Logger package object async { val anyProject: Path = "*" / "*" def resolveProjects[F[_]]( acls: AccessControlLists )(implicit projectCache: ProjectCache[F], log: Logger, F: Monad[F]): F[Map[Project, AccessControlList]] = acls.value.foldLeft(F.pure(Map.empty[Project, AccessControlList])) { case (fProjectsMap, (path, resourceAcl)) => val acl = resourceAcl.value for { projectMap <- fProjectsMap projects <- path.resolveProjects } yield projects.foldLeft(projectMap)((acc, project) => acc + (project -> acc.get(project).map(_ ++ acl).getOrElse(acl)) ) } }
Example 56
Source File: RepairFromMessages.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import java.net.URLDecoder import java.util.UUID import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import ch.epfl.bluebrain.nexus.kg.resources.{Id, Repo, ResId} import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.rdf.Iri import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import monix.execution.schedulers.CanBlock import scala.concurrent.Future import scala.util.Try object RepairFromMessages { // $COVERAGE-OFF$ private val log = Logger[RepairFromMessages.type] def repair(repo: Repo[Task])(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = { log.info("Repairing dependent tables from messages.") val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) Task .fromFuture { pq.currentPersistenceIds() .mapAsync(1) { case ResourceId(id) => (repo.get(id, None).value >> Task.unit).runToFuture case other => log.warn(s"Unknown persistence id '$other'") Future.successful(()) } .runFold(0) { case (acc, _) => if (acc % 1000 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .map(_ => ()) } .runSyncUnsafe() log.info("Finished repairing dependent tables from messages.") } object ResourceId { private val regex = "^resources\\-([0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12})\\-(.+)$".r def unapply(arg: String): Option[ResId] = arg match { case regex(stringUuid, stringId) => for { uuid <- Try(UUID.fromString(stringUuid)).toOption iri <- Iri.absolute(URLDecoder.decode(stringId, "UTF-8")).toOption } yield Id(ProjectRef(uuid), iri) case _ => None } } // $COVERAGE-ON$ }
Example 57
Source File: RepairFromMessages.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.iam import java.net.URLDecoder import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import ch.epfl.bluebrain.nexus.iam.acls.Acls import ch.epfl.bluebrain.nexus.iam.permissions.Permissions import ch.epfl.bluebrain.nexus.iam.realms.Realms import ch.epfl.bluebrain.nexus.iam.types.Label import ch.epfl.bluebrain.nexus.rdf.Iri.Path import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import monix.execution.schedulers.CanBlock import scala.concurrent.Future object RepairFromMessages { // $COVERAGE-OFF$ private val log = Logger[RepairFromMessages.type] def repair( p: Permissions[Task], r: Realms[Task], a: Acls[Task] )(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = { val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) pq.currentPersistenceIds() .mapAsync(1) { case PermissionsId() => p.agg.currentState(p.persistenceId).runToFuture case RealmId(label) => r.agg.currentState(label.value).runToFuture case AclId(path) => a.agg.currentState(path.asString).runToFuture case other => log.warn(s"Unknown persistence id '$other'") Future.successful(()) } .runFold(0) { case (acc, _) => if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .runSyncDiscard() log.info("Repair from messages table completed.") } sealed abstract class PersistenceId(prefix: String) { private val len = prefix.length protected def dropPrefix(arg: String): Option[String] = if (arg.startsWith(prefix)) Some(arg.drop(len)) else None } object RealmId extends PersistenceId("realms-") { def unapply(arg: String): Option[Label] = dropPrefix(arg).map(Label.unsafe) } object AclId extends PersistenceId("acls-") { def unapply(arg: String): Option[Path] = dropPrefix(arg).flatMap(str => Path(URLDecoder.decode(str, "UTF-8")).toOption) } object PermissionsId { def unapply(arg: String): Boolean = arg == "permissions-permissions" } implicit class RichFuture[A](val future: Future[A]) extends AnyVal { def runSyncDiscard()(implicit s: Scheduler, permit: CanBlock): Unit = Task.fromFuture(future).map(_ => ()).runSyncUnsafe() } // $COVERAGE-ON$ }
Example 58
Source File: AuthDirectives.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.service.directives import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.directives.Credentials import akka.http.scaladsl.server.directives.FutureDirectives.onComplete import akka.http.scaladsl.server.{Directive0, Directive1} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.exceptions.AdminError.AuthorizationFailed import ch.epfl.bluebrain.nexus.iam.acls.{AccessControlLists, Acls} import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.iam.realms.Realms import ch.epfl.bluebrain.nexus.iam.types.IamError.InvalidAccessToken import ch.epfl.bluebrain.nexus.iam.types.{Caller, Permission} import ch.epfl.bluebrain.nexus.kg.KgError.AuthenticationFailed import ch.epfl.bluebrain.nexus.rdf.Iri.{AbsoluteIri, Path} import ch.epfl.bluebrain.nexus.rdf.Iri.Path._ import ch.epfl.bluebrain.nexus.rdf.implicits._ import ch.epfl.bluebrain.nexus.service.config.ServiceConfig.HttpConfig import ch.epfl.bluebrain.nexus.service.exceptions.ServiceError.InternalError import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import scala.concurrent.Future import scala.util.{Failure, Success} def extractCallerAcls(path: Path)(implicit c: Caller): Directive1[AccessControlLists] = onComplete(acls.list(path, ancestors = true, self = true).runToFuture).flatMap { case Success(AccessControlLists.empty) => failWith(AuthorizationFailed) case Success(result) => provide(result) case Failure(err) => val message = "Error when trying to check for permissions" logger.error(message, err) failWith(InternalError(message)) } }
Example 59
Source File: AuthDirectives.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.storage.routes import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.server.Directive1 import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.directives.FutureDirectives.onComplete import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient.{AccessToken, Caller} import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClientError.IdentitiesClientStatusError import ch.epfl.bluebrain.nexus.storage.StorageError._ import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler.Implicits.global import scala.util.{Failure, Success} object AuthDirectives { private val logger = Logger[this.type] def extractCaller(implicit identities: IamIdentitiesClient[Task], token: Option[AccessToken]): Directive1[Caller] = onComplete(identities().runToFuture).flatMap { case Success(caller) => provide(caller) case Failure(IdentitiesClientStatusError(StatusCodes.Unauthorized, _)) => failWith(AuthenticationFailed) case Failure(IdentitiesClientStatusError(StatusCodes.Forbidden, _)) => failWith(AuthorizationFailed) case Failure(err) => val message = "Error when trying to extract the subject" logger.error(message, err) failWith(InternalError(message)) } }
Example 60
Source File: Routes.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.storage.routes import akka.http.scaladsl.model.headers.{`WWW-Authenticate`, HttpChallenges} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route} import ch.epfl.bluebrain.nexus.storage.IamIdentitiesClient.Caller import ch.epfl.bluebrain.nexus.storage.StorageError._ import ch.epfl.bluebrain.nexus.storage.config.AppConfig import ch.epfl.bluebrain.nexus.storage.config.AppConfig._ import ch.epfl.bluebrain.nexus.storage.routes.AuthDirectives._ import ch.epfl.bluebrain.nexus.storage.routes.PrefixDirectives._ import ch.epfl.bluebrain.nexus.storage.routes.instances._ import ch.epfl.bluebrain.nexus.storage.{AkkaSource, IamIdentitiesClient, Rejection, StorageError, Storages} import com.typesafe.scalalogging.Logger import monix.eval.Task import scala.util.control.NonFatal object Routes { private[this] val logger = Logger[this.type] def apply( storages: Storages[Task, AkkaSource] )(implicit config: AppConfig, identities: IamIdentitiesClient[Task]): Route = //TODO: Fetch Bearer token and verify identity wrap { concat( AppInfoRoutes(config.description).routes, (pathPrefix(config.http.prefix) & extractToken) { implicit token => extractCaller.apply { case Caller(config.subject.subjectValue, _) => StorageRoutes(storages).routes case _ => failWith(AuthenticationFailed) } } ) } }
Example 61
Source File: AttributesCache.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.storage.attributes import java.nio.file.Path import java.time.Clock import akka.actor.{ActorRef, ActorSystem} import akka.pattern.{ask, AskTimeoutException} import akka.util.Timeout import cats.effect.{ContextShift, Effect, IO} import cats.implicits._ import ch.epfl.bluebrain.nexus.storage.File.FileAttributes import ch.epfl.bluebrain.nexus.storage.StorageError.{InternalError, OperationTimedOut} import ch.epfl.bluebrain.nexus.storage.attributes.AttributesCacheActor.Protocol._ import ch.epfl.bluebrain.nexus.storage.config.AppConfig.DigestConfig import com.typesafe.scalalogging.Logger import scala.util.control.NonFatal trait AttributesCache[F[_]] { def asyncComputePut(filePath: Path, algorithm: String): Unit } object AttributesCache { private[this] val logger = Logger[this.type] def apply[F[_], Source](implicit system: ActorSystem, clock: Clock, tm: Timeout, F: Effect[F], computation: AttributesComputation[F, Source], config: DigestConfig ): AttributesCache[F] = apply(system.actorOf(AttributesCacheActor.props(computation))) private[attributes] def apply[F[_]]( underlying: ActorRef )(implicit system: ActorSystem, tm: Timeout, F: Effect[F]): AttributesCache[F] = new AttributesCache[F] { implicit private val contextShift: ContextShift[IO] = IO.contextShift(system.dispatcher) override def get(filePath: Path): F[FileAttributes] = IO.fromFuture(IO.shift(system.dispatcher) >> IO(underlying ? Get(filePath))) .to[F] .flatMap[FileAttributes] { case attributes: FileAttributes => F.pure(attributes) case other => logger.error(s"Received unexpected reply from the file attributes cache: '$other'") F.raiseError(InternalError("Unexpected reply from the file attributes cache")) } .recoverWith { case _: AskTimeoutException => F.raiseError(OperationTimedOut("reply from the file attributes cache timed out")) case NonFatal(th) => logger.error("Exception caught while exchanging messages with the file attributes cache", th) F.raiseError(InternalError("Exception caught while exchanging messages with the file attributes cache")) } override def asyncComputePut(filePath: Path, algorithm: String): Unit = underlying ! Compute(filePath) } }
Example 62
Source File: LorreInfoLogging.scala From Conseil with Apache License 2.0 | 5 votes |
package tech.cryptonomic.conseil.indexer.logging import com.typesafe.scalalogging.Logger import tech.cryptonomic.conseil.BuildInfo import tech.cryptonomic.conseil.common.config.Platforms.{BlockchainPlatform, PlatformConfiguration} import tech.cryptonomic.conseil.common.io.MainOutputs.{showDatabaseConfiguration, showPlatformConfiguration} import tech.cryptonomic.conseil.indexer.config.LorreConfiguration trait LorreInfoLogging { protected def logger: Logger def displayConfiguration[C <: PlatformConfiguration]( platform: BlockchainPlatform, platformConf: C, lorreConf: LorreConfiguration, ignoreFailures: (String, Option[String]) ): Unit = logger.info( """ | ==================================***================================== | Configuration details | | Connecting to {} {} | on {} | | Reference hash for synchronization with the chain: {} | Requested depth of synchronization: {} | Environment set to skip failed download of chain data: {} [\u2020] | | {} | | [\u2020] To let the process crash on error, | set an environment variable named {} to "off" or "no" | ==================================***================================== | """.stripMargin, platform.name, platformConf.network, showPlatformConfiguration(platformConf), lorreConf.headHash.fold("head")(_.value), lorreConf.depth, ignoreFailures._2.getOrElse("yes"), showDatabaseConfiguration("lorre"), ignoreFailures._1 ) }
Example 63
Source File: JsonToMichelson.scala From Conseil with Apache License 2.0 | 5 votes |
package tech.cryptonomic.conseil.indexer.tezos.michelson import tech.cryptonomic.conseil.indexer.tezos.michelson.dto.MichelsonElement import tech.cryptonomic.conseil.indexer.tezos.michelson.parser.JsonParser import tech.cryptonomic.conseil.indexer.tezos.michelson.parser.JsonParser.Parser import tech.cryptonomic.conseil.indexer.tezos.michelson.renderer.MichelsonRenderer._ import scala.reflect.ClassTag import scala.util.Try import com.typesafe.scalalogging.Logger object JsonToMichelson { type Result[T] = Either[Throwable, T] def convert[T <: MichelsonElement: Parser](json: String): Result[String] = JsonParser.parse[T](json).map(_.render()) def toMichelsonScript[T <: MichelsonElement: Parser]( json: String )(implicit tag: ClassTag[T], logger: Logger): String = { def unparsableResult(json: Any, exception: Option[Throwable] = None): String = { exception match { case Some(t) => logger.error(s"${tag.runtimeClass}: Error during conversion of $json", t) case None => logger.error(s"${tag.runtimeClass}: Error during conversion of $json") } s"Unparsable code: $json" } def parse(json: String): String = convert[T](json) match { case Right(convertedResult) => convertedResult case Left(exception) => unparsableResult(json, Some(exception)) } Try(parse(json)).getOrElse(unparsableResult(json)) } }
Example 64
Source File: WIPRegulator.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.util.concurrent import java.util.concurrent.{ArrayBlockingQueue, TimeUnit} import scala.concurrent.Future import scala.util.{Failure, Success, Try} import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext.Implicits.global import cmwell.util.jmx._ case class WIPRegulator(private var numOfWorkers: Int, noWorkerAlertInterval: Long = 30000) extends WIPRegulatorMBean { jmxRegister(this, "cmwell.indexer:type=WIPRegulator") private val wipQueue = new ArrayBlockingQueue[String](50) // Set intitial number of concurrent requests for (i <- 1 to numOfWorkers)(wipQueue.add("WIP Worker " + i)) def doWithWorkerAsync[T](f: => Future[T])(implicit logger: Logger): Future[T] = { var notFinished = true var reply: Future[T] = Future.failed(FailedToExecuteException()) while (notFinished) { Try { wipQueue.poll(noWorkerAlertInterval, TimeUnit.MILLISECONDS) } match { case Success(null) => logger.error(s"waited for $noWorkerAlertInterval miliseconds and did not get worker, something is wrong") case Success(worker) => reply = f; reply.onComplete(_ => wipQueue.add(worker)); notFinished = false case Failure(execption) => logger.error("InteruptedException while trying to poll a worker from queue"); reply = Future.failed(execption); notFinished = false } } reply } def getNumOfWorkers(): Int = wipQueue.size() def addWorker(): Unit = this.synchronized { numOfWorkers += 1; wipQueue.add(s"WIP Worker $numOfWorkers") } def removeWorker(): Unit = this.synchronized { wipQueue.remove(s"WIP Worker $numOfWorkers"); numOfWorkers -= 1 } } trait WIPRegulatorMBean { def getNumOfWorkers(): Int def addWorker() def removeWorker() } case class FailedToExecuteException(msg: String = "Undifiened reason") extends Exception(msg)
Example 65
Source File: FTSServiceOps.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.fts import akka.NotUsed import akka.stream.scaladsl.Source import cmwell.domain.{AggregationFilter, AggregationsResponse, Infoton, InfotonSerializer} import com.typesafe.scalalogging.Logger import org.elasticsearch.action.ActionRequest import org.elasticsearch.action.bulk.BulkResponse import org.elasticsearch.action.get.GetResponse import org.slf4j.LoggerFactory import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.Duration trait EsSourceExtractor[T] { def extract(hit: GetResponse): T } object EsSourceExtractor { implicit val esStringSourceExtractor = new EsSourceExtractor[String] { override def extract(hit: GetResponse): String = hit.getSourceAsString } object Implicits { implicit val esMapSourceExtractor = new EsSourceExtractor[java.util.Map[String, AnyRef]] { override def extract(hit: GetResponse): java.util.Map[String, AnyRef] = hit.getSourceAsMap } } }
Example 66
Source File: DataToolsLogging.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.tools.data.utils.logging import com.typesafe.scalalogging.{LazyLogging, Logger} import org.slf4j.LoggerFactory case class LabelId(id: String) trait DataToolsLogging { private[data] lazy val redLogger = Logger(LoggerFactory.getLogger("tools-red-logger")) private[data] lazy val badDataLogger = Logger(LoggerFactory.getLogger("tools-bad-data")) val label: Option[String] = None protected lazy val logger: Logger = { val loggerName = if (label.isEmpty) getClass.getName else s"${getClass.getName} [${label.get}]" Logger(LoggerFactory.getLogger(loggerName)) } }
Example 67
Source File: CrawlerRatePrinter.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.crawler import akka.stream.{Attributes, FlowShape, Inlet, Outlet} import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import com.typesafe.scalalogging.Logger object CrawlerRatePrinter { def apply(crawlerId: String, printFrequency: Int, maxPrintRateMillis: Int)(logger: Logger): CrawlerRatePrinter = new CrawlerRatePrinter(crawlerId, printFrequency, maxPrintRateMillis)(logger) } class CrawlerRatePrinter(crawlerId: String, printFrequency: Int, maxPrintRateMillis: Int)(logger: Logger) extends GraphStage[FlowShape[Long, Long]] { val in = Inlet[Long]("CrawlerRatePrinter.in") val out = Outlet[Long]("CrawlerRatePrinter.out") override val shape = FlowShape.of(in, out) override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private val startTime: Double = System.currentTimeMillis private var totalElementsGot: Long = 0L private var printedAtElementNo: Long = 0L private var printedAtTime: Double = startTime private var localStartTime: Double = startTime private var localTotalElementsGot: Long = 0L private var localRate: Double = 0 setHandler( in, new InHandler { override def onPush(): Unit = { val elem = grab(in) totalElementsGot += 1 localTotalElementsGot += 1 if (totalElementsGot - printedAtElementNo >= printFrequency) { val currentTime = System.currentTimeMillis if (currentTime - printedAtTime > maxPrintRateMillis) { val rate = totalElementsGot / (currentTime - startTime) * 1000 if (currentTime - localStartTime > 15000) { localRate = localTotalElementsGot / (currentTime - localStartTime) * 1000 localTotalElementsGot = 0 localStartTime = currentTime } logger.info(s"$crawlerId Current offset is $elem. Total $totalElementsGot offsets already processed. " + s"Read rate: avg: ${rate.formatted("%.2f")} current: ${localRate.formatted("%.2f")} offsets/second") printedAtElementNo = totalElementsGot printedAtTime = currentTime } } push(out, elem) } } ) setHandler(out, new OutHandler { override def onPull(): Unit = { pull(in) } }) } }
Example 68
Source File: FailingFTSServiceMockup.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.bg.test import java.util.concurrent.TimeoutException import cmwell.fts._ import cmwell.util.concurrent.SimpleScheduler import com.typesafe.config.{Config, ConfigFactory} import com.typesafe.scalalogging.Logger import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} override def executeBulkIndexRequests(indexRequests: Iterable[ESIndexRequest], numOfRetries: Int, waitBetweenRetries: Long) (implicit executionContext: ExecutionContext, logger:Logger = loger) = { errorModuloDividend += 1 logger info s"executeBulkIndexRequests: errorModuloDividend=$errorModuloDividend" if(errorModuloDividend % errorModuloDivisor == 2 && errorCount <=2 ) { errorCount += 1 logger info s"delaying response" throw new TimeoutException("fake") } else { logger info "forwarding to real ftsservice" super.executeBulkIndexRequests(indexRequests, numOfRetries, waitBetweenRetries) } } }
Example 69
Source File: CodegenTestCaseRunner.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.codegen.integration import com.typesafe.scalalogging.Logger import io.getquill.codegen.util.ConfigPrefix import io.getquill.codegen.util.SchemaConfig._ import io.getquill.codegen.util.TryOps._ import org.slf4j.LoggerFactory import scala.concurrent.duration.Duration import scala.concurrent.Await object SchemaNames { val simpleSnake = `schema_snakecase` val simpleLiteral = `schema_casesensitive` val twoSchema = `schema_snakecase_twoschema_differentcolumns_differenttypes` } object CodegenTestCaseRunner { private val logger = Logger(LoggerFactory.getLogger(this.getClass)) def main(args: Array[String]): Unit = { val path = args(0) val prefixes = if (args.drop(1).contains("all")) ConfigPrefix.all else args.drop(1).map(ConfigPrefix.fromValue(_).orThrow).toList prefixes.foreach(prefix => { val generatedFiles = apply(prefix, path) generatedFiles.foreach(f => logger.info(s"${prefix} | ${f}")) }) } def apply(dbPrefix: ConfigPrefix, path: String) = { CodegenTestCases(dbPrefix).map(gen => { logger.info(s"Generating files for: ${dbPrefix.value} (${dbPrefix.packagePath}) with ${gen}") // Since auto-commit in enabled, need to wait for each test-case individually. Otherwise tests // will step on each-other's toes. Await.result(gen.generateWithSchema(dbPrefix, path), Duration.Inf).toSeq }).flatten } }
Example 70
Source File: SchemaConfig.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.codegen.util import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import scala.io.Source sealed trait SchemaConfig { private val logger = Logger(LoggerFactory.getLogger(this.getClass)) def fileName: String = { import scala.reflect.runtime.{ universe => u } val m = u.runtimeMirror(this.getClass.getClassLoader) val sym = m.reflect(this).symbol sym.name.decodedName.toString + ".sql" } lazy val content: String = { val content = Source.fromURL(this.getClass.getClassLoader.getResource(fileName)).mkString logger.info("Loaded content: " + content) content } } object SchemaConfig { case object `schema_casesensitive` extends SchemaConfig case object `schema_simple` extends SchemaConfig case object `schema_snakecase` extends SchemaConfig case object `schema_snakecase_twoschema_differentcolumns_differenttypes` extends SchemaConfig case object `schema_snakecase_twotable` extends SchemaConfig case object `schema_snakecase_twotable_differentcolumns` extends SchemaConfig case object `schema_snakecase_twotable_differentcolumns_differenttypes` extends SchemaConfig case object `schema_twotable` extends SchemaConfig }
Example 71
Source File: Ancestry.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.codegen.dag import com.typesafe.scalalogging.Logger import io.getquill.codegen.dag.dag.ClassAncestry import io.getquill.codegen.util.MapExtensions._ import org.slf4j.LoggerFactory import scala.language.implicitConversions import scala.reflect.{ ClassTag, classTag } class DagNode(val cls: ClassTag[_], val parent: Option[DagNode]) trait NodeCatalog { def lookup(cls: ClassTag[_]): DagNode } object DefaultNodeCatalog extends NodeCatalog { private val logger = Logger(LoggerFactory.getLogger(this.getClass)) implicit def nodeToOpt(dagNode: DagNode) = Some(dagNode) object StringNode extends DagNode(classTag[String], None) object BigDecimalNode extends DagNode(classTag[BigDecimal], StringNode) object DoubleNode extends DagNode(classTag[Double], BigDecimalNode) object FloatNode extends DagNode(classTag[Float], DoubleNode) object LongNode extends DagNode(classTag[Long], BigDecimalNode) object IntNode extends DagNode(classTag[Int], LongNode) object ShortNode extends DagNode(classTag[Short], IntNode) object ByteNode extends DagNode(classTag[Byte], ShortNode) object BooleanNode extends DagNode(classTag[Boolean], ByteNode) object TimestampNode extends DagNode(classTag[java.time.LocalDateTime], StringNode) object DateNode extends DagNode(classTag[java.time.LocalDate], TimestampNode) protected[codegen] val nodeCatalogNodes: Seq[DagNode] = Seq( StringNode, BigDecimalNode, DoubleNode, FloatNode, LongNode, IntNode, ByteNode, ShortNode, BooleanNode, TimestampNode, DateNode ) override def lookup(cls: ClassTag[_]): DagNode = nodeCatalogNodes.find(_.cls == cls).getOrElse({ logger.warn(s"Could not find type hiearchy node for: ${cls} Must assume it's a string") StringNode }) } package object dag { type ClassAncestry = (ClassTag[_], ClassTag[_]) => ClassTag[_] } class CatalogBasedAncestry(ancestryCatalog: NodeCatalog = DefaultNodeCatalog) extends ClassAncestry { def apply(one: ClassTag[_], two: ClassTag[_]): ClassTag[_] = { def getAncestry(node: DagNode): List[DagNode] = node.parent match { case Some(parent) => node :: getAncestry(parent) case None => node :: Nil } def commonAncestry = { val oneAncestry = getAncestry(ancestryCatalog.lookup(one)) val twoAncestry = getAncestry(ancestryCatalog.lookup(two)) val (node, _) = oneAncestry.zipWithIndex.toMap.zipOnKeys(twoAncestry.zipWithIndex.toMap) .collect { case (key, (Some(i), Some(j))) => (key, i + j) } .toList .sortBy { case (node, order) => order } .head node.cls } // If the two nodes are exactly the same thing, just return the type. Otherwise look up the DAG. if (one == two) one else commonAncestry } }
Example 72
Source File: ContextLogger.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.util import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import scala.annotation.tailrec class ContextLogger(name: String) { val underlying = Logger(LoggerFactory.getLogger(name)) private val bindsEnabled = sys.props.get("quill.binds.log").contains("true") private val nullToken = "null" def logQuery(query: String, params: Seq[Any]): Unit = { if (!bindsEnabled || params.isEmpty) underlying.debug(query) else { underlying.debug("{} - binds: {}", query, prepareParams(params)) } } def logBatchItem(query: String, params: Seq[Any]): Unit = { if (bindsEnabled) { underlying.debug("{} - batch item: {}", query, prepareParams(params)) } } private def prepareParams(params: Seq[Any]): String = params .reverse .map(prepareParam) .mkString("[", ", ", "]") @tailrec private def prepareParam(param: Any): String = param match { case None | null => nullToken case Some(x) => prepareParam(x) case str: String => s"'$str'" case _ => param.toString } } object ContextLogger { def apply(ctxClass: Class[_]): ContextLogger = new ContextLogger(ctxClass.getName) }
Example 73
Source File: LinearRegression.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.examples import org.platanios.tensorflow.api._ import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import scala.collection.mutable.ArrayBuffer import scala.util.Random object LinearRegression { private val logger = Logger(LoggerFactory.getLogger("Examples / Linear Regression")) private val random = new Random() private val weight = random.nextFloat() def main(args: Array[String]): Unit = { logger.info("Building linear regression model.") val inputs = tf.placeholder[Float](Shape(-1, 1)) val outputs = tf.placeholder[Float](Shape(-1, 1)) val weights = tf.variable[Float]("weights", Shape(1, 1), tf.ZerosInitializer) val predictions = tf.matmul(inputs, weights) val loss = tf.sum(tf.square(tf.subtract(predictions, outputs))) val trainOp = tf.train.AdaGrad(1.0f).minimize(loss) logger.info("Training the linear regression model.") val session = Session() session.run(targets = tf.globalVariablesInitializer()) for (i <- 0 to 50) { val trainBatch = batch(10000) val feeds = Map(inputs -> trainBatch._1, outputs -> trainBatch._2) val trainLoss = session.run(feeds = feeds, fetches = loss, targets = trainOp) if (i % 1 == 0) logger.info(s"Train loss at iteration $i = ${trainLoss.scalar} " + s"(weight = ${session.run(fetches = weights.value).scalar})") } logger.info(s"Trained weight value: ${session.run(fetches = weights.value).scalar}") logger.info(s"True weight value: $weight") } def batch(batchSize: Int): (Tensor[Float], Tensor[Float]) = { val inputs = ArrayBuffer.empty[Float] val outputs = ArrayBuffer.empty[Float] var i = 0 while (i < batchSize) { val input = random.nextFloat() inputs += input outputs += weight * input i += 1 } (Tensor[Float](inputs.toSeq).reshape(Shape(-1, 1)), Tensor[Float](outputs.toSeq).reshape(Shape(-1, 1))) } }
Example 74
Source File: NaNChecker.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.implicits.helpers.{OutputStructure, OutputToTensor} import org.platanios.tensorflow.api.ops.{Op, Output} import org.platanios.tensorflow.api.tensors.{Tensor, ops} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class NaNChecker protected ( val tensorNames: Set[String], val failOnNaN: Boolean = true ) extends Hook { private var outputs: Seq[Output[Any]] = _ override protected def begin(): Unit = { // Convert tensor names to op outputs. outputs = tensorNames.map(Op.currentGraph.getOutputByName).toSeq } override protected def beforeSessionRun[C: OutputStructure, CV]( runContext: Hook.SessionRunContext[C, CV] )(implicit evOutputToTensorC: OutputToTensor.Aux[C, CV] ): Option[Hook.SessionRunArgs[Seq[Output[Any]], Seq[Tensor[Any]]]] = { Some(Hook.SessionRunArgs(fetches = outputs)) } @throws[IllegalStateException] override protected def afterSessionRun[C: OutputStructure, CV]( runContext: Hook.SessionRunContext[C, CV], runResult: Hook.SessionRunResult[Seq[Tensor[Any]]] )(implicit evOutputToTensorC: OutputToTensor.Aux[C, CV]): Unit = { // TODO: [TYPES] !!! Remove the cast once we start using static types everywhere. runResult.result.zip(tensorNames).filter(r => ops.Math.any(ops.Math.isNaN(r._1.toFloat)).scalar).foreach(value => { val message = s"Encountered NaN values in tensor: ${value._2}." if (failOnNaN) { NaNChecker.logger.error(message) throw new IllegalStateException(message) } else { NaNChecker.logger.warn(message) // We do not raise an error but we request to stop iterating without throwing an exception. runContext.requestStop() } }) } } object NaNChecker { private[NaNChecker] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Tensor NaN")) def apply(tensorNames: Set[String], failOnNaN: Boolean = true): NaNChecker = { new NaNChecker(tensorNames, failOnNaN) } }
Example 75
Source File: LossLogger.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.core.client.Session import org.platanios.tensorflow.api.implicits.Implicits._ import org.platanios.tensorflow.api.ops.{Output, UntypedOp} import org.platanios.tensorflow.api.tensors.Tensor import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import java.nio.file.Path class LossLogger protected ( val log: Boolean = true, val summaryDir: Path = null, val trigger: HookTrigger = StepHookTrigger(1), val triggerAtEnd: Boolean = true, val formatter: (Option[Double], Long, Float) => String = null ) extends TriggeredHook(trigger, triggerAtEnd) with ModelDependentHook[Any, Any, Any, Any, Any, Any] with SummaryWriterHookAddOn { require(log || summaryDir != null, "At least one of 'log' and 'summaryDir' needs to be provided.") protected var loss: Output[Float] = _ override protected def begin(): Unit = { loss = modelInstance.loss.map(_.castTo[Float]).orNull } override protected def fetches: Seq[Output[Any]] = Seq(loss) override protected def targets: Set[UntypedOp] = Set.empty override protected def onTrigger( step: Long, elapsed: Option[(Double, Int)], runResult: Hook.SessionRunResult[Seq[Tensor[Any]]], session: Session ): Unit = { val loss = runResult.result.head.scalar.asInstanceOf[Float] val log = { if (formatter != null) { formatter(elapsed.map(_._1), step, loss) } else { elapsed.map(_._1) match { case Some(s) => f"($s%9.3f s) Step: $step%6d, Loss: $loss%.4f" case None => f"( N/A ) Step: $step%6d, Loss: $loss%.4f" } } } LossLogger.logger.info(log) writeSummary(step, "Loss", loss) } } object LossLogger { private[LossLogger] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Loss Logger")) def apply( log: Boolean = true, summaryDir: Path = null, trigger: HookTrigger = StepHookTrigger(1), triggerAtEnd: Boolean = true, formatter: (Option[Double], Long, Float) => String = null ): LossLogger = { new LossLogger(log, summaryDir, trigger, triggerAtEnd, formatter) } }
Example 76
Source File: TensorLogger.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.core.client.Session import org.platanios.tensorflow.api.ops.{Op, Output, UntypedOp} import org.platanios.tensorflow.api.tensors.Tensor import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class TensorLogger protected ( val tensors: Map[String, String], val trigger: HookTrigger = StepHookTrigger(1), val triggerAtEnd: Boolean = true, val formatter: Map[String, Tensor[_]] => String = null ) extends TriggeredHook(trigger, triggerAtEnd) { protected val tensorTags : Seq[String] = tensors.keys.toSeq protected val tensorNames: Seq[String] = tensors.values.toSeq protected var outputs : Seq[Output[Any]] = _ override protected def begin(): Unit = { // Convert tensor names to op outputs. outputs = tensorNames.map(t => Op.currentGraph.getOutputByName(t)) } override protected def fetches: Seq[Output[Any]] = outputs override protected def targets: Set[UntypedOp] = Set.empty override protected def onTrigger( step: Long, elapsed: Option[(Double, Int)], runResult: Hook.SessionRunResult[Seq[Tensor[Any]]], session: Session ): Unit = { val tensors = tensorTags.zip(runResult.result.tail) if (formatter != null) { TensorLogger.logger.info(formatter(tensors.toMap)) } else { val valuesLog = tensors.map(t => { s"${t._1} = ${t._2.summarize(flattened = true, includeInfo = false)}" }).mkString(", ") val log = elapsed.map(_._1) match { case Some(s) => f"($s%9.3f s) $valuesLog" case None => s"( N/A ) $valuesLog" } TensorLogger.logger.info(log) } } } object TensorLogger { private[TensorLogger] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Tensor Logging")) def apply( tensors: Map[String, String], trigger: HookTrigger = StepHookTrigger(1), triggerAtEnd: Boolean = true, formatter: Map[String, Tensor[_]] => String = null ): TensorLogger = { new TensorLogger(tensors, trigger, triggerAtEnd, formatter) } }
Example 77
Source File: TensorBoardHook.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.config.TensorBoardConfig import org.platanios.tensorflow.api.core.client.Session import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import scala.util.Try private[learn] class TensorBoardHook protected (val tensorBoardConfig: TensorBoardConfig) extends Hook { private var tensorBoardProcess: Option[Process] = None override protected def begin(): Unit = tensorBoardProcess = { Option(tensorBoardConfig).flatMap(config => { TensorBoardHook.logger.info( s"Launching TensorBoard in '${config.host}:${config.port}' " + s"for log directory '${config.logDir.toAbsolutePath}'.") val processOrError = Try(config.processBuilder.start()) processOrError.failed.foreach(e => { TensorBoardHook.logger.warn(e.getMessage) TensorBoardHook.logger.warn( "Could not launch TensorBoard. Please make sure it is installed correctly and in your PATH.") }) processOrError.toOption }) } override protected def end(session: Session): Unit = { tensorBoardProcess.foreach(process => { TensorBoardHook.logger.info("Killing the TensorBoard service.") process.destroy() }) } } private[learn] object TensorBoardHook { private[TensorBoardHook] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / TensorBoard")) def apply(tensorBoardConfig: TensorBoardConfig): TensorBoardHook = new TensorBoardHook(tensorBoardConfig) }
Example 78
Source File: StepRateLogger.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.core.client.Session import org.platanios.tensorflow.api.ops.{Output, UntypedOp} import org.platanios.tensorflow.api.tensors.Tensor import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import java.nio.file.Path class StepRateLogger protected ( val log: Boolean = true, val summaryDir: Path = null, val trigger: HookTrigger = StepHookTrigger(10), val triggerAtEnd: Boolean = true, val tag: String = "Steps/Sec" ) extends TriggeredHook(trigger, triggerAtEnd) with SummaryWriterHookAddOn { require(log || summaryDir != null, "At least one of 'log' and 'summaryDir' needs to be provided.") override protected def fetches: Seq[Output[Any]] = Seq.empty override protected def targets: Set[UntypedOp] = Set.empty override protected def onTrigger( step: Long, elapsed: Option[(Double, Int)], runResult: Hook.SessionRunResult[Seq[Tensor[Any]]], session: Session ): Unit = { elapsed.foreach(elapsed => { val stepRate = elapsed._2.toDouble / elapsed._1 if (log) StepRateLogger.logger.info(f"$tag: $stepRate%.2f") writeSummary(step, tag, stepRate.toFloat) }) } } object StepRateLogger { private[StepRateLogger] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Step Rate")) def apply( log: Boolean = true, summaryDir: Path = null, trigger: HookTrigger = StepHookTrigger(10), triggerAtEnd: Boolean = true, tag: String = "Steps/Sec" ): StepRateLogger = { new StepRateLogger(log, summaryDir, trigger, triggerAtEnd, tag) } }
Example 79
Source File: TimelineHook.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.learn.hooks import org.platanios.tensorflow.api.core.client.{Session, Timeline} import org.platanios.tensorflow.api.ops.{Output, UntypedOp} import org.platanios.tensorflow.api.tensors.Tensor import org.platanios.tensorflow.proto.RunOptions import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import java.nio.file.{Files, Path, StandardOpenOption} class TimelineHook protected ( val workingDir: Path, val showDataFlow: Boolean = false, val showMemory: Boolean = false, val prettyJson: Boolean = false, val trigger: HookTrigger = StepHookTrigger(1000), val triggerAtEnd: Boolean = true ) extends TriggeredHook(trigger, triggerAtEnd) { override protected def fetches: Seq[Output[Any]] = Seq.empty override protected def targets: Set[UntypedOp] = Set.empty override protected def runOptions: Option[RunOptions] = { Some(RunOptions.newBuilder().setTraceLevel(RunOptions.TraceLevel.FULL_TRACE).build()) } override protected def wantMetadata: Boolean = true override protected def onTrigger( step: Long, elapsed: Option[(Double, Int)], runResult: Hook.SessionRunResult[Seq[Tensor[Any]]], session: Session ): Unit = { TimelineHook.logger.info("Saving timeline.") val file = workingDir.resolve(s"trace$step.json") val stepStatistics = runResult.runMetadata.get.getStepStats val chromeTraceJSON = Timeline.generateChromeTrace(stepStatistics, showDataFlow, showMemory, prettyJson) val fileWriter = Files.newBufferedWriter(file, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE) fileWriter.write(chromeTraceJSON) fileWriter.flush() fileWriter.close() TimelineHook.logger.info(s"Saved timeline to '$file'.") } } object TimelineHook { private[TimelineHook] val logger = Logger(LoggerFactory.getLogger("Learn / Hooks / Timeline")) def apply( workingDir: Path, showDataFlow: Boolean = false, showMemory: Boolean = false, prettyJson: Boolean = false, trigger: HookTrigger = StepHookTrigger(1000), triggerAtEnd: Boolean = true ): TimelineHook = { new TimelineHook(workingDir, showDataFlow, showMemory, prettyJson, trigger, triggerAtEnd) } }
Example 80
Source File: TFRecordReader.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.io import org.platanios.tensorflow.api.core.exception.{DataLossException, OutOfRangeException} import org.platanios.tensorflow.api.utilities.{CRC32C, Coding} import org.platanios.tensorflow.proto.Example import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import java.io.BufferedInputStream import java.nio.file.{Files, Path, StandardOpenOption} private[this] def readNext(): Example = { try { // Read the header data. val encLength = new Array[Byte](12) fileStream.read(encLength) val recordLength = Coding.decodeFixedInt64(encLength).toInt val encLengthMaskedCrc = CRC32C.mask(CRC32C.value(encLength.take(8))) if (Coding.decodeFixedInt32(encLength, offset = 8) != encLengthMaskedCrc) { throw DataLossException("Encountered corrupted TensorFlow record.") } // Read the data. val encData = new Array[Byte](recordLength + 4) fileStream.read(encData) val recordData = encData.take(recordLength) val encDataMaskedCrc = CRC32C.mask(CRC32C.value(encData.take(recordLength))) if (Coding.decodeFixedInt32(encData, offset = recordLength) != encDataMaskedCrc) { throw DataLossException("Encountered corrupted TensorFlow record.") } Example.parseFrom(recordData) } catch { case _: OutOfRangeException | _: DataLossException => // We ignore partial read exceptions, because a record may be truncated. The record reader holds the offset // prior to the failed read, and so retrying will succeed. TFRecordReader.logger.info(s"No more TF records stored at '${filePath.toAbsolutePath}'.") null } } override def hasNext: Boolean = { if (nextExample == null) nextExample = readNext() nextExample != null } override def next(): Example = { val example = { if (nextExample == null) readNext() else nextExample } if (example != null) nextExample = readNext() example } } } } private[io] object TFRecordReader { private[TFRecordReader] val logger: Logger = Logger(LoggerFactory.getLogger("TF Record Reader")) }
Example 81
Source File: EventFileReader.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.api.io.events import org.platanios.tensorflow.api.core.exception.{DataLossException, OutOfRangeException} import org.platanios.tensorflow.api.io.{CompressionType, Loader, NoCompression} import org.platanios.tensorflow.api.utilities.{CRC32C, Coding} import org.platanios.tensorflow.proto.Event import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import java.io.BufferedInputStream import java.nio.file.{Files, Path, StandardOpenOption} private[this] def readNext(): Event = { try { // Read the header data. val encLength = new Array[Byte](12) fileStream.read(encLength) val recordLength = Coding.decodeFixedInt64(encLength).toInt val encLengthMaskedCrc = CRC32C.mask(CRC32C.value(encLength.take(8))) if (Coding.decodeFixedInt32(encLength, offset = 8) != encLengthMaskedCrc) { throw DataLossException("Encountered corrupted TensorFlow record.") } // Read the data. val encData = new Array[Byte](recordLength + 4) fileStream.read(encData) val recordData = encData.take(recordLength) val encDataMaskedCrc = CRC32C.mask(CRC32C.value(encData.take(recordLength))) if (Coding.decodeFixedInt32(encData, offset = recordLength) != encDataMaskedCrc) { throw DataLossException("Encountered corrupted TensorFlow record.") } Event.parseFrom(recordData) } catch { case _: OutOfRangeException | _: DataLossException => // We ignore partial read exceptions, because a record may be truncated. The record reader holds the offset // prior to the failed read, and so retrying will succeed. EventFileReader.logger.info(s"No more TF records stored at '${filePath.toAbsolutePath}'.") null } } override def hasNext: Boolean = { if (nextEvent == null) nextEvent = readNext() nextEvent != null } override def next(): Event = { val event = { if (nextEvent == null) readNext() else nextEvent } if (event != null) nextEvent = readNext() event } } } } private[io] object EventFileReader { private[EventFileReader] val logger: Logger = Logger(LoggerFactory.getLogger("Event File Reader")) }
Example 82
Source File: Loader.scala From tensorflow_scala with Apache License 2.0 | 5 votes |
package org.platanios.tensorflow.data import com.typesafe.scalalogging.Logger import java.io.IOException import java.net.URL import java.nio.file.{Files, Path} import scala.collection.compat.immutable.LazyList import scala.io.Source import scala.util.matching.Regex trait Loader { protected val logger: Logger protected val googleDriveConfirmTokenRegex: Regex = { """<a id="uc-download-link".*href="/uc\?export=download&(confirm=.*)&id=.*">Download anyway</a>""".r } def maybeDownload(path: Path, url: String, bufferSize: Int = 8192): Boolean = { if (Files.exists(path)) { false } else { try { logger.info(s"Downloading file '$url'.") Files.createDirectories(path.getParent) download(path, url, bufferSize) // Small hack to deal with downloading large Google Drive files. if (Files.size(path) < 1024 * 1024 && url.contains("drive.google.com")) { val content = Source.fromFile(path.toFile).getLines().mkString("\n") googleDriveConfirmTokenRegex.findFirstMatchIn(content) match { case Some(confirmToken) => download(path, s"$url&${confirmToken.group(1)}", bufferSize) case None => () } } logger.info(s"Downloaded file '$url'.") true } catch { case e: IOException => logger.error(s"Could not download file '$url'", e) throw e } } } protected def download(path: Path, url: String, bufferSize: Int = 8192): Unit = { val connection = new URL(url).openConnection() val contentLength = connection.getContentLengthLong val inputStream = connection.getInputStream val outputStream = Files.newOutputStream(path) val buffer = new Array[Byte](bufferSize) var progress = 0L var progressLogTime = System.currentTimeMillis LazyList.continually(inputStream.read(buffer)).takeWhile(_ != -1).foreach(numBytes => { outputStream.write(buffer, 0, numBytes) progress += numBytes val time = System.currentTimeMillis if (time - progressLogTime >= 1e4) { if (contentLength > 0) { val numBars = Math.floorDiv(10 * progress, contentLength).toInt logger.info(s"[${"=" * numBars}${" " * (10 - numBars)}] $progress / $contentLength bytes downloaded.") progressLogTime = time } else { logger.info(s"$progress bytes downloaded.") progressLogTime = time } } }) outputStream.close() } }
Example 83
Source File: EventSource.scala From nexus-iam with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.iam.client import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.stream.Materializer import akka.stream.alpakka.sse.scaladsl.{EventSource => SSESource} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.iam.client.config.IamClientConfig import ch.epfl.bluebrain.nexus.iam.client.types.AuthToken import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri import ch.epfl.bluebrain.nexus.rdf.implicits._ import com.typesafe.scalalogging.Logger import io.circe.Decoder import io.circe.parser.decode import scala.concurrent.{ExecutionContext, Future} trait EventSource[A] { def apply[A: Decoder]( config: IamClientConfig )(implicit as: ActorSystem, mt: Materializer, ec: ExecutionContext): EventSource[A] = new EventSource[A] { private val logger = Logger[this.type] private val http = Http() private def addCredentials(request: HttpRequest)(implicit cred: Option[AuthToken]): HttpRequest = cred.map(token => request.addCredentials(OAuth2BearerToken(token.value))).getOrElse(request) private def send(request: HttpRequest)(implicit cred: Option[AuthToken]): Future[HttpResponse] = http.singleRequest(addCredentials(request)).map { resp => if (!resp.status.isSuccess()) logger.warn(s"HTTP response when performing SSE request: status = '${resp.status}'") resp } override def apply(iri: AbsoluteIri, offset: Option[String])( implicit cred: Option[AuthToken] ): Source[A, NotUsed] = SSESource(iri.asAkka, send, offset, config.sseRetryDelay).flatMapConcat { sse => decode[A](sse.data) match { case Right(ev) => Source.single(ev) case Left(err) => logger.error(s"Failed to decode admin event '$sse'", err) Source.empty } } } }
Example 84
Source File: RepairFromMessages.scala From nexus-iam with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.iam import java.net.URLDecoder import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import ch.epfl.bluebrain.nexus.iam.acls.Acls import ch.epfl.bluebrain.nexus.iam.permissions.Permissions import ch.epfl.bluebrain.nexus.iam.realms.Realms import ch.epfl.bluebrain.nexus.iam.types.Label import ch.epfl.bluebrain.nexus.rdf.Iri.Path import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import monix.execution.schedulers.CanBlock import scala.concurrent.Future object RepairFromMessages { // $COVERAGE-OFF$ private val log = Logger[RepairFromMessages.type] def repair( p: Permissions[Task], r: Realms[Task], a: Acls[Task] )(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = { val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) pq.currentPersistenceIds() .mapAsync(1) { case PermissionsId() => p.agg.currentState(p.persistenceId).runToFuture case RealmId(label) => r.agg.currentState(label.value).runToFuture case AclId(path) => a.agg.currentState(path.asString).runToFuture case other => log.warn(s"Unknown persistence id '$other'") Future.successful(()) } .runFold(0) { case (acc, _) => if (acc % 100 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .runSyncDiscard() log.info("Repair from messages table completed.") } sealed abstract class PersistenceId(prefix: String) { private val len = prefix.length protected def dropPrefix(arg: String): Option[String] = if (arg.startsWith(prefix)) Some(arg.drop(len)) else None } object RealmId extends PersistenceId("realms-") { def unapply(arg: String): Option[Label] = dropPrefix(arg).map(Label.unsafe) } object AclId extends PersistenceId("acls-") { def unapply(arg: String): Option[Path] = dropPrefix(arg).flatMap(str => Path(URLDecoder.decode(str, "UTF-8")).toOption) } object PermissionsId { def unapply(arg: String): Boolean = arg == "permissions-permissions" } implicit class RichFuture[A](val future: Future[A]) extends AnyVal { def runSyncDiscard()(implicit s: Scheduler, permit: CanBlock): Unit = Task.fromFuture(future).map(_ => ()).runSyncUnsafe() } // $COVERAGE-ON$ }
Example 85
Source File: TelnetClientApp.scala From asura with MIT License | 5 votes |
package asura.dubbo.telnet import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.ActorMaterializer import akka.util.ByteString import asura.common.actor.BaseActor import asura.dubbo.actor.TelnetClientActor import com.typesafe.scalalogging.Logger object TelnetClientApp { val logger = Logger("TelnetClientApp") implicit val system = ActorSystem("telnet") implicit val ec = system.dispatcher implicit val materializer = ActorMaterializer() var clientActor: ActorRef = null def main(args: Array[String]): Unit = { val echoActor = system.actorOf(Props(new Echo())) clientActor = system.actorOf(TelnetClientActor.props("127.0.0.1", 20880, echoActor)) } class Echo() extends BaseActor { var isLs = false override def receive: Receive = { case msg: ByteString => log.info(s"from server:${msg.utf8String}") if (!isLs) { clientActor ! ByteString("ls\r\n") isLs = true } } } }
Example 86
Source File: TelnetEchoApp.scala From asura with MIT License | 5 votes |
package asura.dubbo.telnet import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Flow, Framing, Tcp} import akka.util.ByteString import com.typesafe.scalalogging.Logger object TelnetEchoApp { val logger = Logger("TelnetEchoApp") implicit val system = ActorSystem("telnet") implicit val ec = system.dispatcher implicit val materializer = ActorMaterializer() val echo = Flow[ByteString] .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = false)) .map(_.utf8String) .map(txt => { logger.info(s"got(${txt.length}):${txt}") txt + "\n" }) .map(ByteString(_)) def main(args: Array[String]): Unit = { val connections = Tcp().bind("127.0.0.1", 8888) connections runForeach { connection => logger.info(s"New connection from: ${connection.remoteAddress}") connection.handleWith(echo) } } }
Example 87
Source File: JavaScriptEngine.scala From asura with MIT License | 5 votes |
package asura.core.script import asura.common.util.LogUtils import asura.core.script.builtin.{Functions, StringGenerator} import com.typesafe.scalalogging.Logger import javax.script.{CompiledScript, ScriptContext, ScriptEngineManager, SimpleScriptContext} import jdk.nashorn.api.scripting.NashornScriptEngine object JavaScriptEngine { private val logger = Logger("JavaScriptEngine") // use separate execution context // private val engineExecutionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(4)) private val sem = new ScriptEngineManager(getClass.getClassLoader) val engine: NashornScriptEngine = sem.getEngineByName("javascript").asInstanceOf[NashornScriptEngine] private val baseLibs: CompiledScript = { logger.info("initialize base javascript libraries") engine.compile(StringGenerator.exports + Functions.exports) } val localContext: ThreadLocal[ScriptContext] = ThreadLocal.withInitial(() => initScriptContext()) def eval(script: String, bindingsData: java.util.Map[String, Any] = null): Any = { if (null != bindingsData) { val context = localContext.get() val bindings = context.getBindings(ScriptContext.ENGINE_SCOPE) bindings.putAll(bindingsData) engine.eval(script, context) } else { engine.eval(script, localContext.get()) } } def eval(script: CompiledScript, bindingsData: java.util.Map[String, Any]): Any = { val context = localContext.get() if (null != bindingsData) { val bindings = context.getBindings(ScriptContext.ENGINE_SCOPE) bindings.putAll(bindingsData) script.eval(context) } else { script.eval(context) } } // initialize a ScriptContext with base libraries private def initScriptContext(): ScriptContext = { val context = new SimpleScriptContext() try { context.setWriter(new CustomWriter()) baseLibs.eval(context) } catch { case t: Throwable => logger.warn(LogUtils.stackTraceToString(t)) } context } }
Example 88
Source File: JobStatusActor.scala From asura with MIT License | 5 votes |
package asura.core.job.actor import akka.actor.Status.Failure import akka.actor.{ActorRef, Props} import asura.common.actor._ import asura.common.model.Pagination import asura.core.model.QueryJob import asura.core.es.service.JobService import asura.core.job.actor.JobStatusMonitorActor.JobStatusOperationMessage import asura.core.job.eventbus.JobStatusBus.JobStatusNotificationMessage import asura.core.job.{JobListItem, JobStates} import asura.core.redis.RedisJobState import asura.core.util.JacksonSupport import com.typesafe.scalalogging.Logger import scala.collection.mutable import scala.collection.mutable.ArrayBuffer class JobStatusActor() extends BaseActor { var query: QueryJob = null val watchIds = mutable.HashSet[String]() override def receive: Receive = { case SenderMessage(sender) => context.become(query(sender)) } def query(outSender: ActorRef): Receive = { case query: QueryJob => this.query = query JobService.queryJob(query).map(esResponse => if (esResponse.isSuccess) { val items = ArrayBuffer[JobListItem]() val jobsTable = mutable.HashMap[String, JobListItem]() val hits = esResponse.result.hits watchIds.clear() hits.hits.foreach(hit => { val jobId = hit.id watchIds.add(jobId) jobsTable += (jobId -> { val item = JacksonSupport.parse(hit.sourceAsString, classOf[JobListItem]) item.state = JobStates.UNKNOWN items += item item._id = jobId item }) }) if (watchIds.nonEmpty) { RedisJobState.getJobState(watchIds.toSet).onComplete { case util.Success(statesMap) => statesMap.forEach((jobKey, state) => jobsTable(jobKey).state = state) outSender ! ListActorEvent(Map("total" -> hits.total, "list" -> items)) case util.Failure(_) => outSender ! ListActorEvent(Map("total" -> hits.total, "list" -> items)) }(context.system.dispatcher) } else { outSender ! ListActorEvent(Map("total" -> 0, "list" -> Nil)) } } else { outSender ! ErrorActorEvent(esResponse.error.reason) })(context.system.dispatcher) case JobStatusNotificationMessage(_, operator, scheduler, group, name, data) => if (watchIds.contains(name)) { outSender ! ItemActorEvent(JobStatusOperationMessage(operator, scheduler, group, name, data)) } case eventMessage: ActorEvent => outSender ! eventMessage case Failure(t) => outSender ! ErrorActorEvent(t.getMessage) } override def postStop(): Unit = { import JobStatusActor.logger logger.debug(s"JobStatus for ${query} stopped") } } object JobStatusActor { val logger = Logger(classOf[JobStatusActor]) def props() = Props(new JobStatusActor()) case class JobQueryMessage(scheduler: String = null, group: String = null, text: String = null) extends Pagination }
Example 89
Source File: ClearJobReportDataIndicesJob.scala From asura with MIT License | 5 votes |
package asura.core.job.impl import asura.common.util.FutureUtils.RichFuture import asura.core.es.service.{IndexService, JobReportDataItemService} import com.typesafe.scalalogging.Logger import org.quartz.{Job, JobExecutionContext} import scala.concurrent.duration._ class ClearJobReportDataIndicesJob extends Job { import ClearJobReportDataIndicesJob._ val logger = Logger(classOf[ClearJobReportDataIndicesJob]) override def execute(context: JobExecutionContext): Unit = { val detail = context.getJobDetail val day = detail.getJobDataMap.getInt(KEY_DAY) if (day > 0) { val response = JobReportDataItemService.getIndices().await(30 seconds) if (response.isSuccess) { val indices = response.result.slice(day, response.result.size).map(_.index) if (indices.nonEmpty) { logger.info(s"delete indices: ${indices.mkString(",")}") IndexService.delIndex(indices).await(30 seconds) } } else { logger.error(response.error.reason) } } } } object ClearJobReportDataIndicesJob { val NAME = "ClearJobReportIndicesJob" val KEY_CRON = "cron" val KEY_DAY = "day" val DEFAULT_DAY = 20 }
Example 90
Source File: RedisClient.scala From asura with MIT License | 5 votes |
package asura.core.redis import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper} import com.fasterxml.jackson.module.scala.DefaultScalaModule import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper import com.typesafe.scalalogging.Logger import org.redisson.Redisson import org.redisson.api.{RFuture, RedissonClient} import org.redisson.codec.JsonJacksonCodec import org.redisson.config.Config import scala.compat.java8.FutureConverters.{toScala => javaFutureToScalaFuture} import scala.concurrent.Future object RedisClient { val logger = Logger("RedisClient") var redisson: RedissonClient = null private val mapper: ObjectMapper with ScalaObjectMapper = new ObjectMapper() with ScalaObjectMapper mapper.registerModule(DefaultScalaModule) mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false) mapper.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false) def init(servers: Seq[String] = Nil): Unit = { val config = new Config() config.setCodec(new JsonJacksonCodec(mapper)) if (null == servers || servers.isEmpty) { config.useSingleServer().setAddress("redis://127.0.0.1:6379") } else if (servers.length == 1) { config.useSingleServer().setAddress(servers(0)) } else { config.useClusterServers().setScanInterval(3000).addNodeAddress(servers: _*) } logger.info(s"init redis client with config: ${config.toJSON}") redisson = Redisson.create(config) } def shutdown(): Unit = { if (null != redisson) { logger.info("shutdown redis client") redisson.shutdown() } } implicit def toScala[T](rf: RFuture[T]): Future[T] = { javaFutureToScalaFuture(rf) } }
Example 91
Source File: RedisJobState.scala From asura with MIT License | 5 votes |
package asura.core.redis import java.util import asura.common.util.LogUtils import asura.core.concurrent.ExecutionContextManager.cachedExecutor import asura.core.redis.RedisClient.{redisson, toScala} import com.typesafe.scalalogging.Logger import org.redisson.client.codec.StringCodec import scala.collection.JavaConverters.setAsJavaSet import scala.concurrent.Future import scala.util.{Failure, Success} object RedisJobState { val logger = Logger("RedisJobState") val KEY_JOB_STATE = "asura_job_state" def updateJobState(scheduler: String, jobGroup: String, jobName: String, state: String)(successBlock: => Unit): Unit = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.fastPutAsync(jobName, state).onComplete { case Failure(t) => logger.error(LogUtils.stackTraceToString(t)) case Success(_) => logger.debug(s"update $jobName to $state successful.") successBlock } } def deleteJobState(scheduler: String, jobGroup: String, jobName: String)(successBlock: => Unit): Unit = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.fastRemoveAsync(jobName).onComplete { case Failure(t) => logger.error(LogUtils.stackTraceToString(t)) case Success(_) => logger.debug(s"delete $jobName state.") successBlock } } def getJobState(keys: Set[String]): Future[util.Map[String, String]] = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.getAllAsync(setAsJavaSet(keys)) } }
Example 92
Source File: EntityUtils.scala From asura with MIT License | 5 votes |
package asura.core.http import java.net.URLEncoder import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity, RequestEntity} import akka.util.ByteString import asura.common.util.{LogUtils, StringUtils} import asura.core.es.model.{HttpCaseRequest, KeyValueObject} import asura.core.http.UriUtils.UTF8 import asura.core.runtime.RuntimeContext import asura.core.util.JacksonSupport import com.fasterxml.jackson.core.`type`.TypeReference import com.typesafe.scalalogging.Logger object EntityUtils { val logger = Logger("EntityUtils") def toEntity(cs: HttpCaseRequest, context: RuntimeContext): RequestEntity = { val request = cs.request var contentType: ContentType = ContentTypes.`text/plain(UTF-8)` var byteString: ByteString = ByteString.empty if (StringUtils.isNotEmpty(request.contentType) && null != request.body && request.body.nonEmpty) { request.contentType match { case HttpContentTypes.JSON => contentType = ContentTypes.`application/json` val body = request.body.find(_.contentType == HttpContentTypes.JSON) if (body.nonEmpty) { byteString = ByteString(context.renderBodyAsString(body.get.data)) } case HttpContentTypes.X_WWW_FORM_URLENCODED => contentType = ContentTypes.`application/x-www-form-urlencoded` val body = request.body.find(_.contentType == HttpContentTypes.X_WWW_FORM_URLENCODED) if (body.nonEmpty) { var bodyStr: String = null try { val sb = StringBuilder.newBuilder val params = JacksonSupport.parse(body.get.data, new TypeReference[Seq[KeyValueObject]]() {}) for (pair <- params if (pair.enabled && StringUtils.isNotEmpty(pair.key))) { val rendered = context.renderBodyAsString(pair.value) sb.append(pair.key).append("=").append(URLEncoder.encode(rendered, UTF8)).append("&") } if (sb.nonEmpty) { sb.deleteCharAt(sb.length - 1) } bodyStr = sb.toString } catch { case t: Throwable => val errLog = LogUtils.stackTraceToString(t) logger.warn(errLog) bodyStr = errLog } byteString = ByteString(bodyStr) } case HttpContentTypes.TEXT_PLAIN => contentType = ContentTypes.`text/plain(UTF-8)` val body = request.body.find(_.contentType == HttpContentTypes.TEXT_PLAIN) if (body.nonEmpty) { byteString = ByteString(context.renderBodyAsString(body.get.data)) } case _ => } } HttpEntity(contentType, byteString) } }
Example 93
Source File: HeaderUtils.scala From asura with MIT License | 5 votes |
package asura.core.http import akka.http.scaladsl.model.HttpHeader.ParsingResult.{Error, Ok} import akka.http.scaladsl.model.headers.{Cookie, RawHeader} import akka.http.scaladsl.model.{ErrorInfo, HttpHeader} import asura.common.util.StringUtils import asura.core.es.model.{Environment, HttpCaseRequest} import asura.core.runtime.RuntimeContext import asura.core.{CoreConfig, ErrorMessages} import com.typesafe.scalalogging.Logger import scala.collection.immutable import scala.collection.mutable.ArrayBuffer object HeaderUtils { val logger = Logger("HeaderUtils") def toHeaders(cs: HttpCaseRequest, context: RuntimeContext): immutable.Seq[HttpHeader] = { val headers = ArrayBuffer[HttpHeader]() val request = cs.request val env = if (null != context.options) context.options.getUsedEnv() else null if (null != request) { val headerSeq = request.header if (null != headerSeq) { for (h <- headerSeq if (h.enabled && StringUtils.isNotEmpty(h.key))) { HttpHeader.parse(h.key, context.renderSingleMacroAsString(h.value)) match { case Ok(header: HttpHeader, errors: List[ErrorInfo]) => if (errors.nonEmpty) logger.warn(errors.mkString(",")) headers += header case Error(error: ErrorInfo) => logger.warn(error.detail) } } } val cookieSeq = request.cookie if (null != cookieSeq) { val cookies = ArrayBuffer[(String, String)]() for (c <- cookieSeq if (c.enabled && StringUtils.isNotEmpty(c.key))) { cookies += ((c.key, context.renderSingleMacroAsString(c.value))) } if (cookies.nonEmpty) headers += Cookie(cookies: _*) } } if (null != env && null != env.headers && env.headers.nonEmpty) { for (h <- env.headers if (h.enabled && StringUtils.isNotEmpty(h.key))) { HttpHeader.parse(h.key, context.renderSingleMacroAsString(h.value)) match { case Ok(header: HttpHeader, errors: List[ErrorInfo]) => if (errors.nonEmpty) logger.warn(errors.mkString(",")) headers += header case Error(error: ErrorInfo) => logger.warn(error.detail) } } } if (null != env && env.enableProxy) { val headerIdentifier = validateProxyVariables(env) val dst = StringBuilder.newBuilder dst.append("/").append(cs.group).append("/").append(cs.project).append("/").append(env.namespace) headers += RawHeader(headerIdentifier, dst.toString) } headers.toList } def validateProxyVariables(env: Environment): String = { if (!CoreConfig.linkerdConfig.enabled) { throw ErrorMessages.error_ProxyDisabled.toException } if (StringUtils.isEmpty(env.namespace)) { throw ErrorMessages.error_EmptyNamespace.toException } if (StringUtils.isEmpty(env.server)) { throw ErrorMessages.error_EmptyProxyServer.toException } val proxyServerOpt = CoreConfig.linkerdConfig.servers.find(_.tag.equals(env.server)) if (proxyServerOpt.isEmpty && StringUtils.isEmpty(proxyServerOpt.get.headerIdentifier)) { throw ErrorMessages.error_InvalidProxyConfig.toException } else { proxyServerOpt.get.headerIdentifier } } def isApplicationJson(header: HttpHeader): Boolean = { if (header.lowercaseName().equals("content-type")) { header.value().contains(HttpContentTypes.JSON) } else { false } } }
Example 94
Source File: MySqlConnector.scala From asura with MIT License | 5 votes |
package asura.core.sql import java.sql._ import java.util import java.util.Base64 import asura.common.util.{LogUtils, RSAUtils, StringUtils} import asura.core.CoreConfig import asura.core.es.model.SqlRequest.SqlRequestBody import com.typesafe.scalalogging.Logger object MySqlConnector { val logger = Logger("MySqlConnectors") @throws[Throwable] def connect(sql: SqlRequestBody): Connection = { val url = s"jdbc:mysql://${sql.host}:${sql.port}/${sql.database}?autoReconnect=true&useCursorFetch=true&useUnicode=true&characterEncoding=utf-8" try { val password = if (StringUtils.isNotEmpty(sql.encryptedPass)) { val bytes = Base64.getDecoder.decode(sql.encryptedPass) new String(RSAUtils.decryptByPublicKey(bytes, CoreConfig.securityConfig.pubKeyBytes)) } else { sql.password } DriverManager.getConnection(url, sql.username, password) } catch { case t: Throwable => logger.error(LogUtils.stackTraceToString(t)) throw t } } @throws[Throwable] def executeUpdate(conn: Connection, sql: String): Integer = { val statement = conn.createStatement() try { statement.executeUpdate(sql) } catch { case t: Throwable => logger.error(LogUtils.stackTraceToString(t)) throw t } finally { statement.close() } } @throws[Throwable] def executeQuery(conn: Connection, sql: String): java.util.List[java.util.HashMap[String, Object]] = { var statement: Statement = null try { // https://stackoverflow.com/questions/26046234/is-there-a-mysql-jdbc-that-will-respect-fetchsize statement = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY) statement.setFetchSize(SqlConfig.MAX_ROWS_SIZE) val rs = statement.executeQuery(sql) val list = new util.ArrayList[util.HashMap[String, Object]]() val metaData = rs.getMetaData val count = metaData.getColumnCount while (rs.next() && list.size() < SqlConfig.MAX_ROWS_SIZE) { val row = new util.HashMap[String, Object]() for (i <- 1 to count) { val value = preserve(metaData, rs, i) if (null != value) row.put(metaData.getColumnName(i), value) } list.add(row) } list } finally { if (null != statement) statement.close() } } private def preserve(meta: ResultSetMetaData, rs: ResultSet, col: Int): Object = { val className = meta.getColumnClassName(col) className match { case "java.lang.Long" | "java.lang.Integer" | "java.lang.Short" | "java.lang.Byte" | "java.lang.Boolean" => rs.getObject(col) case _ => rs.getObject(col).toString } } }
Example 95
Source File: SqlParserUtils.scala From asura with MIT License | 5 votes |
package asura.core.sql import asura.common.util.{LogUtils, StringUtils} import com.alibaba.druid.sql.SQLUtils import com.alibaba.druid.sql.ast.statement.{SQLAlterTableStatement, SQLDropTableStatement, SQLExprTableSource, SQLSelectStatement} import com.alibaba.druid.sql.dialect.mysql.ast.statement._ import com.typesafe.scalalogging.Logger object SqlParserUtils { val logger = Logger("SqlParserUtils") def isSelectStatement(sql: String): (Boolean, String) = { try { val statement = SQLUtils.parseSingleMysqlStatement(sql) statement match { case _: SQLSelectStatement => (true, null) case _ => (false, null) } } catch { case t: Throwable => (false, t.getMessage) } } @throws[Throwable] def getStatementTable(sql: String): String = { try { val statement = SQLUtils.parseSingleMysqlStatement(sql) val tableName = statement match { case statement: MySqlCreateTableStatement => val name = statement.getTableSource.getName if (null == name) StringUtils.EMPTY else name.getSimpleName case statement: SQLDropTableStatement => val sb = StringBuilder.newBuilder statement.getTableSources.forEach(s => { val name = s.getName if (null != name) sb.append(name.getSimpleName).append(",") }) if (sb.length > 0) sb.substring(0, sb.length - 1) else StringUtils.EMPTY case statement: SQLAlterTableStatement => val table = statement.getTableName if (null != table) table else StringUtils.EMPTY case statement: MySqlInsertStatement => val name = statement.getTableName if (null == name) StringUtils.EMPTY else name.getSimpleName case statement: MySqlDeleteStatement => val table = statement.getTableName if (null != table) table.getSimpleName else StringUtils.EMPTY case statement: MySqlUpdateStatement => val name = statement.getTableName if (null == name) StringUtils.EMPTY else name.getSimpleName case statement: SQLSelectStatement => val name = statement.getSelect.getQuery.asInstanceOf[MySqlSelectQueryBlock] .getFrom.asInstanceOf[SQLExprTableSource] .getName if (null == name) StringUtils.EMPTY else name.getSimpleName case _ => StringUtils.EMPTY } tableName.toLowerCase } catch { case t: Throwable => logger.warn(LogUtils.stackTraceToString(t)) throw t } } }
Example 96
Source File: SqlRunner.scala From asura with MIT License | 5 votes |
package asura.core.sql import akka.pattern.ask import akka.util.Timeout import asura.common.exceptions.WithDataException import asura.core.concurrent.ExecutionContextManager.sysGlobal import asura.core.es.model.SqlRequest import asura.core.es.model.SqlRequest.SqlRequestBody import asura.core.runtime.{ContextOptions, RuntimeContext, RuntimeMetrics} import asura.core.sql.SqlReportModel.{SqlRequestReportModel, SqlResponseReportModel} import asura.core.{CoreConfig, RunnerActors} import com.typesafe.scalalogging.Logger import scala.concurrent.Future object SqlRunner { val logger = Logger("SqlRunner") implicit val timeout: Timeout = CoreConfig.DEFAULT_ACTOR_ASK_TIMEOUT lazy val sqlInvoker = RunnerActors.sqlInvoker def test(docId: String, request: SqlRequest, context: RuntimeContext = RuntimeContext()): Future[SqlResult] = { implicit val metrics = RuntimeMetrics() metrics.start() context.eraseCurrentData() var options = context.options if (null != options) { options.caseEnv = request.env } else { options = ContextOptions(caseEnv = request.env) context.options = options } metrics.renderRequestStart() context.evaluateOptions().flatMap(_ => { renderRequest(request.request, context) .flatMap(tuple => { metrics.performRequestStart() (sqlInvoker ? tuple._1).flatMap(responseObj => { context.setCurrentEntity(responseObj.asInstanceOf[Object]) metrics.evalAssertionBegin() context.setCurrentMetrics(metrics) SqlResult.evaluate( docId, request.assert, context, tuple._2, SqlResponseReportModel(responseObj.asInstanceOf[Object]) ) }).recover { case t: Throwable => throw WithDataException(t, tuple._2) } }) .map(result => { metrics.evalAssertionEnd() metrics.theEnd() result.metrics = metrics.toReportStepItemMetrics() result }) }) } def renderRequest(request: SqlRequestBody, context: RuntimeContext) (implicit metrics: RuntimeMetrics): Future[(SqlRequestBody, SqlRequestReportModel)] = { val host = request.host val port = request.port val database = request.database val sql = context.renderBodyAsString(request.sql) metrics.renderRequestEnd() metrics.renderAuthBegin() metrics.renderAuthEnd() val renderedRequest = request.copyFrom(host, port, database, sql) val reportModel = SqlRequestReportModel( host = request.host, port = request.port, username = request.username, database = request.database, table = request.table, sql = sql ) Future.successful((renderedRequest, reportModel)) } }
Example 97
Source File: AssertionContext.scala From asura with MIT License | 5 votes |
package asura.core.assertion.engine import asura.common.util.LogUtils import asura.core.concurrent.ExecutionContextManager.cachedExecutor import asura.core.assertion._ import asura.core.util.JsonPathUtils import com.jayway.jsonpath.PathNotFoundException import com.typesafe.scalalogging.Logger import scala.concurrent.Future case class AssertionContext( assert: Map[String, Any], context: Object, statis: Statistic ) object AssertionContext { val logger = Logger(classOf[AssertionContext]) def eval(assert: Map[String, Any], context: Object, statis: Statistic): Future[java.util.Map[String, Any]] = { val result = new java.util.concurrent.ConcurrentHashMap[String, Any]() if (null != assert && null != context) { val resultFutures = for ((k, v) <- assert) yield { if (null == v) { statis.failOnce() val assertionResult = FailAssertResult(1, s"null assert: $assert") result.put(k, assertionResult) Future.successful(result) } else { if (k.startsWith("$.") || k.startsWith("$[")) { // path handleJsonPath(context, k, v, result, statis) } else if (k.startsWith("$")) { // assertion val assertion = Assertions.get(k) if (assertion.nonEmpty) { assertion.get.assert(context, v).map(assertionResult => { if (null != assertionResult) { statis.countAndSetState(assertionResult) result.put(k, assertionResult.toReport) } result }) } else { result.put(k, FailAssertResult(1, AssertResult.MSG_UNSUPPORTED_ASSERTION)) Future.successful(result) } } else { statis.failOnce() result.put(k, FailAssertResult(1, AssertResult.MSG_UNRECOGNIZED_KEY)) Future.successful(result) } } } Future.sequence(resultFutures).map(_ => result) } else { Future.successful(result) } } private def handleJsonPath( context: Object, k: String, v: Any, result: java.util.Map[String, Any], statis: Statistic ): Future[java.util.Map[String, Any]] = { try { val subContext = JsonPathUtils.read[Object](context, k) AssertionContext.eval(v.asInstanceOf[Map[String, Any]], subContext, statis).map { subAssert => result.put(k, subAssert) result }.recover { case _: ClassNotFoundException => statis.failOnce() statis.isSuccessful = false result.put(k, FailAssertResult(1, AssertResult.MSG_UNSUPPORTED_ASSERT_FORMAT)) result case t: Throwable => statis.failOnce() statis.isSuccessful = false logger.warn(LogUtils.stackTraceToString(t)) result.put(k, FailAssertResult(1, t.getMessage)) result } } catch { case _: PathNotFoundException => statis.failOnce() statis.isSuccessful = false result.put(k, FailAssertResult(1, AssertResult.pathNotFound(k))) Future.successful(result) case t: Throwable => statis.failOnce() statis.isSuccessful = false logger.warn(LogUtils.stackTraceToString(t)) result.put(k, FailAssertResult(1, t.getMessage)) Future.successful(result) } } }
Example 98
Source File: StringTemplate.scala From asura with MIT License | 5 votes |
package asura.core.util import asura.common.exceptions.InvalidStatusException import asura.common.util.{LogUtils, StringUtils} import com.typesafe.scalalogging.Logger import jodd.util.StringTemplateParser import scala.collection.{immutable, mutable} object StringTemplate { def uriPathParse(tpl: String, context: immutable.Map[String, String]): String = { uriPathParser.parse(tpl, macroName => { context.get(macroName) match { case None => throw InvalidStatusException(s"${macroName}: path template variable not found") case Some(value) => value } }) } }
Example 99
Source File: EsClient.scala From asura with MIT License | 5 votes |
package asura.core.es import asura.common.util.StringUtils import asura.core.CoreConfig.EsOnlineLogConfig import asura.core.es.model._ import asura.core.es.service.IndexService import com.sksamuel.elastic4s.http.{JavaClient, NoOpHttpClientConfigCallback} import com.sksamuel.elastic4s.{ElasticClient, ElasticProperties} import com.typesafe.scalalogging.Logger import org.apache.http.client.config.RequestConfig import org.elasticsearch.client.RestClientBuilder.RequestConfigCallback import scala.collection.mutable object EsClient { val logger = Logger("EsClient") private var client: ElasticClient = _ private val onlineLogClient = mutable.Map[String, EsOnlineLogConfig]() def esClient: ElasticClient = client def esOnlineLogClient(tag: String) = onlineLogClient.get(tag) def esOnlineLogClients = onlineLogClient.values def init(url: String): Boolean = { client = ElasticClient(JavaClient(ElasticProperties(url))) var isAllOk = true val indices: Seq[IndexSetting] = Seq( HttpCaseRequest, RestApi, Job, Project, Environment, Group, JobReport, JobNotify, Scenario, UserProfile, Activity, DomainOnlineLog, ProjectApiCoverage, DomainOnlineConfig, DubboRequest, SqlRequest, Favorite, CiTrigger, TriggerEventLog ) for (index <- indices if isAllOk) { logger.info(s"check es index ${index.Index}") isAllOk = IndexService.initCheck(index) } isAllOk = IndexService.checkTemplate() isAllOk } def initOnlineLogClient(configs: Seq[EsOnlineLogConfig]): Unit = { if (null != configs && configs.nonEmpty) { val clientCache = mutable.Map[String, ElasticClient]() configs.foreach(config => { if (StringUtils.isNotEmpty(config.url)) { config.onlineLogClient = clientCache.get(config.url).getOrElse({ val client = ElasticClient(JavaClient(ElasticProperties(config.url), new CusRequestConfigCallback(), NoOpHttpClientConfigCallback)) clientCache += (config.url -> client) client }) onlineLogClient += (config.tag -> config) } }) } } def closeClient(): Unit = { if (null != esClient) esClient.close() if (esOnlineLogClients.nonEmpty) esOnlineLogClients.foreach(config => { if (null != config.onlineLogClient) config.onlineLogClient.close() }) } class CusRequestConfigCallback extends RequestConfigCallback { val connectionTimeout = 600000 val socketTimeout = 600000 override def customizeRequestConfig(requestConfigBuilder: RequestConfig.Builder): RequestConfig.Builder = { // See https://github.com/elastic/elasticsearch/issues/24069 // It's fixed in master now but still yet to release to 6.3.1 requestConfigBuilder.setConnectionRequestTimeout(0) .setConnectTimeout(connectionTimeout) .setSocketTimeout(socketTimeout) } } }
Example 100
Source File: IndexService.scala From asura with MIT License | 5 votes |
package asura.core.es.service import asura.common.util.StringUtils import asura.core.concurrent.ExecutionContextManager.sysGlobal import asura.core.es.EsClient import asura.core.es.model.{FieldKeys, IndexSetting, JobReportDataItem, RestApiOnlineLog} import com.sksamuel.elastic4s.ElasticDsl._ import com.sksamuel.elastic4s.Indexes import com.sksamuel.elastic4s.requests.delete.DeleteByQueryRequest import com.sksamuel.elastic4s.requests.searches.queries.Query import com.typesafe.scalalogging.Logger import scala.collection.mutable.ArrayBuffer import scala.concurrent.Future object IndexService extends CommonService { val logger = Logger("IndexService") def initCheck(idx: IndexSetting): Boolean = { val cli = EsClient.esClient val res = cli.execute(indexExists(idx.Index)).await if (res.isSuccess) { if (res.result.exists) { true } else { val res2 = cli.execute { createIndex(idx.Index) .shards(idx.shards) .replicas(idx.replicas) .mapping(idx.mappings) }.await if (res2.isSuccess) { true } else { logger.error(res2.error.reason) false } } } else { logger.error(res.error.reason) false } } def checkTemplate(): Boolean = { checkIndexTemplate(JobReportDataItem).await && checkIndexTemplate(RestApiOnlineLog).await } def checkIndexTemplate(idxSetting: IndexSetting): Future[Boolean] = { logger.info(s"check es template ${idxSetting.Index}") val cli = EsClient.esClient cli.execute { getIndexTemplate(idxSetting.Index) }.map { res => if (res.status != 404) true else false }.recover { case _ => false }.flatMap(hasTpl => { if (!hasTpl) { cli.execute { createIndexTemplate(idxSetting.Index, s"${idxSetting.Index}-*") .settings(Map( "number_of_replicas" -> idxSetting.replicas, "number_of_shards" -> idxSetting.shards )) .mappings(idxSetting.mappings) }.map(tplIndex => { if (tplIndex.result.acknowledged) true else false }) } else { Future.successful(true) } }) } def delIndex(indices: Seq[String]) = { EsClient.esClient.execute { deleteIndex(indices) }.map(toDeleteIndexResponse(_)) } def deleteByGroupOrProject(indices: Seq[String], group: String, project: String) = { val esQueries = ArrayBuffer[Query]() if (StringUtils.isNotEmpty(group)) esQueries += termQuery(FieldKeys.FIELD_GROUP, group) if (StringUtils.isNotEmpty(project)) esQueries += termQuery(FieldKeys.FIELD_PROJECT, project) EsClient.esClient.execute { DeleteByQueryRequest( Indexes(indices), boolQuery().must(esQueries) ).refreshImmediately }.map(toDeleteByQueryResponse(_)) } }
Example 101
Source File: PackagePlatformExtensions.scala From qamr with MIT License | 5 votes |
package qamr.util import java.io.StringWriter import java.io.PrintWriter import scala.util.{Try, Success, Failure} import com.typesafe.scalalogging.Logger trait PackagePlatformExtensions { implicit class RichTry[A](val t: Try[A]) { def toOptionLogging(logger: Logger): Option[A] = t match { case Success(a) => Some(a) case Failure(e) => val sw = new StringWriter() val pw = new PrintWriter(sw, true) e.printStackTrace(pw) logger.error(e.getLocalizedMessage + "\n" + sw.getBuffer.toString) None } } }
Example 102
Source File: PersistentBufferBase.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.pattern.stream import java.io.File import akka.actor.{ActorSystem, Props} import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import akka.stream.{Attributes, FlowShape, Inlet, Outlet} import com.typesafe.config.Config import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory abstract class PersistentBufferBase[T, S] (private[stream] val queue: PersistentQueue[T], onPushCallback: () => Unit = () => {}) (implicit serializer: QueueSerializer[T], system: ActorSystem) extends GraphStage[FlowShape[T, S]] { def this(config: Config)(implicit serializer: QueueSerializer[T], system: ActorSystem) = this(new PersistentQueue[T](config)) def this(persistDir: File)(implicit serializer: QueueSerializer[T], system: ActorSystem) = this(new PersistentQueue[T](persistDir)) private[stream] val in = Inlet[T]("PersistentBuffer.in") private[stream] val out = Outlet[S]("PersistentBuffer.out") val shape: FlowShape[T, S] = FlowShape.of(in, out) val defaultOutputPort = 0 @volatile protected var upstreamFailed = false @volatile protected var upstreamFinished = false protected val queueCloserActor = system.actorOf(Props(classOf[PersistentQueueCloserActor[T]], queue)) protected def elementOut(e: Event[T]): S protected def autoCommit(index: Long) = {} def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var lastPushed = 0L var downstreamWaiting = false override def preStart(): Unit = { // Start upstream demand pull(in) } setHandler(in, new InHandler { override def onPush(): Unit = { val element = grab(in) queue.enqueue(element) onPushCallback() if (downstreamWaiting) { queue.dequeue() foreach { element => push(out, elementOut(element)) downstreamWaiting = false lastPushed = element.index autoCommit(element.index) } } pull(in) } override def onUpstreamFinish(): Unit = { upstreamFinished = true if (downstreamWaiting) { queueCloserActor ! PushedAndCommitted(defaultOutputPort, lastPushed, queue.read(defaultOutputPort)) queueCloserActor ! UpstreamFinished completeStage() } } override def onUpstreamFailure(ex: Throwable): Unit = { val logger = Logger(LoggerFactory.getLogger(this.getClass)) logger.error("Received upstream failure signal: " + ex) upstreamFailed = true queueCloserActor ! UpstreamFailed completeStage() } }) setHandler(out, new OutHandler { override def onPull(): Unit = { queue.dequeue() match { case Some(element) => push(out, elementOut(element)) lastPushed = element.index autoCommit(element.index) case None => if (upstreamFinished) { queueCloserActor ! PushedAndCommitted(defaultOutputPort, lastPushed, queue.read(defaultOutputPort)) queueCloserActor ! UpstreamFinished completeStage() } else downstreamWaiting = true } } }) } }
Example 103
Source File: package.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs import java.net.{URLDecoder, URLEncoder} import java.nio.ByteBuffer import java.nio.charset.Charset import akka.actor.{Address, AddressFromURIString} import akka.util.ByteString import com.typesafe.scalalogging.Logger import org.apache.curator.framework.CuratorFramework import org.apache.zookeeper.CreateMode import org.apache.zookeeper.KeeperException.NodeExistsException import scala.language.implicitConversions import scala.util.Try import scala.util.control.NonFatal import scala.collection.JavaConverters._ package object cluster { trait SegmentationLogic { val segmentsSize:Int def segmentation(partitionKey:ByteString): String = s"segment-${Math.abs(partitionKey.hashCode()) % segmentsSize}" def partitionZkPath(partitionKey:ByteString): String = s"/segments/${segmentation(partitionKey)}/${keyToPath(partitionKey)}" def sizeOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/$$size" def servantsOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/servants" } case class DefaultSegmentationLogic(segmentsSize:Int) extends SegmentationLogic def guarantee(path:String, data:Option[Array[Byte]], mode:CreateMode = CreateMode.EPHEMERAL) (implicit zkClient:CuratorFramework, logger:Logger):String = { try{ data match { case None => zkClient.create.withMode(mode).forPath(path) case Some(bytes) => zkClient.create.withMode(mode).forPath(path, bytes) } } catch{ case e: NodeExistsException => if(data.nonEmpty && data.get.length > 0){ zkClient.setData().forPath(path, data.get) } path case NonFatal(e) => logger.info("leader znode creation failed due to %s\n", e) path } } def safelyDiscard(path:String, recursive: Boolean = true)(implicit zkClient: CuratorFramework): String = Try { if(recursive) zkClient.getChildren.forPath(path).asScala.foreach(child => safelyDiscard(s"$path/$child", recursive)) zkClient.delete.forPath(path) path } getOrElse path def keyToPath(name:String):String = URLEncoder.encode(name, "utf-8") def pathToKey(name:String):String = URLDecoder.decode(name, "utf-8") private[cluster] val BYTES_OF_INT = Integer.SIZE / java.lang.Byte.SIZE implicit def intToBytes(integer:Int):Array[Byte] = { val buf = ByteBuffer.allocate(BYTES_OF_INT) buf.putInt(integer) buf.rewind buf.array() } val UTF_8 = Charset.forName("utf-8") implicit class ByteConversions(val bytes: Array[Byte]) extends AnyVal { def toAddress: Option[Address] = Option(bytes) flatMap (b => if (b.length <= 0) None else Some(AddressFromURIString(new String(b, UTF_8)))) def toInt: Int = ByteBuffer.wrap(bytes).getInt def toUtf8: String = new String(bytes, UTF_8) def toByteString: ByteString = ByteString(bytes) def toAddressSet: Set[Address] = Try { new String(bytes, UTF_8).split("[,]").map(seg => AddressFromURIString(seg.trim)).toSet } getOrElse Set.empty } implicit def byteStringToUtf8(bs:ByteString):String = new String(bs.toArray, UTF_8) implicit def addressToBytes(address:Address):Array[Byte] = { address.toString.getBytes(UTF_8) } implicit def addressSetToBytes(members: Set[Address]): Array[Byte] = { members.mkString(",").getBytes(UTF_8) } }
Example 104
Source File: StreamEnvImpl.scala From swave with Mozilla Public License 2.0 | 5 votes |
package swave.core.impl import java.util.concurrent.{ConcurrentHashMap, TimeoutException} import scala.annotation.tailrec import scala.util.Try import scala.concurrent.{Future, Promise} import scala.concurrent.duration._ import com.typesafe.config.Config import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import swave.core.macros._ import swave.core._ private[core] final class StreamEnvImpl(val name: String, val config: Config, val settings: StreamEnv.Settings, val classLoader: ClassLoader) extends StreamEnv { val startTime = System.currentTimeMillis() val log = Logger(LoggerFactory.getLogger(name)) val dispatchers = DispatchersImpl(settings.dispatcherSettings) val scheduler = SchedulerImpl(settings.schedulerSettings) if (settings.logConfigOnStart) log.info(settings.toString) // TODO: improve rendering def defaultDispatcher = dispatchers.defaultDispatcher def shutdown(): StreamEnv.Termination = new StreamEnv.Termination { val schedulerTermination = scheduler.shutdown() val dispatchersTermination = dispatchers.shutdownAll() def isTerminated: Boolean = schedulerTermination.isCompleted && unterminatedDispatchers.isEmpty def unterminatedDispatchers: List[String] = dispatchersTermination() def awaitTermination(timeout: FiniteDuration): Unit = { requireArg(timeout >= Duration.Zero, "`timeout` must be > 0") var deadline = System.nanoTime() + timeout.toNanos if (deadline < 0) deadline = Long.MaxValue // overflow protection @tailrec def await(): Unit = if (!isTerminated) { if (System.nanoTime() < deadline) { Thread.sleep(1L) await() } else { val unterminated = if (schedulerTermination.isCompleted) unterminatedDispatchers else "scheduler" :: unterminatedDispatchers throw new TimeoutException( s"StreamEnv did not shut down within specified timeout of $timeout.\n" + s"Unterminated dispatchers: [${unterminated.mkString(", ")}]") } } await() } } private[this] val _extensions = new ConcurrentHashMap[ExtensionId[_], Future[_ <: Extension]] @tailrec def getOrLoadExtension[T <: Extension](id: ExtensionId[T]): Future[T] = _extensions.get(id) match { case null ⇒ val promise = Promise[T]() _extensions.putIfAbsent(id, promise.future) match { case null ⇒ val tryValue = Try(id.createExtension(this)) promise.complete(tryValue) val future = Promise.fromTry(tryValue).future _extensions.put(id, future) // speed up future accesses somewhat future case _ ⇒ getOrLoadExtension(id) } case x ⇒ x.asInstanceOf[Future[T]] } }
Example 105
Source File: StreamActor.scala From swave with Mozilla Public License 2.0 | 5 votes |
package swave.core.impl import java.util.concurrent.atomic.AtomicBoolean import scala.util.control.NonFatal import scala.annotation.tailrec import com.typesafe.scalalogging.Logger import org.jctools.queues.MpscLinkedQueue8 import swave.core.Dispatcher private[this] val scheduled: AtomicBoolean with Runnable = new AtomicBoolean(true) with Runnable { final def run(): Unit = try { @tailrec def processMailbox(maxRemainingMessages: Int): Unit = if (maxRemainingMessages > 0) { val nextMsg = mailbox.poll() if (nextMsg ne null) { receive(nextMsg) processMailbox(maxRemainingMessages - 1) } } processMailbox(throughput) } catch { // Non-fatal exceptions thrown in user code should never bubble up to here! // Non-fatal exceptions thrown in (async) swave code should always bubble up to here! case NonFatal(e) ⇒ log.error("Uncaught exception in StreamActor::receive", e) } finally { startMessageProcessing() } } protected def receive(msg: MessageType): Unit private[impl] final def enqueue(msg: MessageType): Unit = { mailbox.offer(msg) trySchedule() } private def trySchedule(): Unit = if (scheduled.compareAndSet(false, true)) { try dispatcher.execute(scheduled) catch { case NonFatal(e) ⇒ scheduled.set(false) throw e } } // must be called at the end of the outermost constructor protected final def startMessageProcessing(): Unit = { scheduled.set(false) if (!mailbox.isEmpty) trySchedule() } }
Example 106
Source File: Main.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler import java.nio.charset.StandardCharsets import java.nio.file.{Paths, Files} import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.scala.DefaultScalaModule import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper import com.typesafe.scalalogging.Logger import org.crowdcrawler.proxycrawler.checker.ProxyChecker import org.slf4j.LoggerFactory object Main { private val LOGGER = Logger(LoggerFactory.getLogger(Main.getClass)) val OBJECT_MAPPER = new ObjectMapper() with ScalaObjectMapper OBJECT_MAPPER.registerModule(DefaultScalaModule) def main(args: Array[String]): Unit = { val usage = "Usage: \n\tcrawl [pluginClassName]* OutputFile\n" + "\tcheck proxies.json valid_proxies.json\n" + "\tfilter valid_proxies.json <HTTP|HTTPS|SOCKS> output.json\n" + "For example:\n" + "\t1. Crawl all supported websites and save proxies to proxies.json\n" + "\t\tcrawl proxies.json\n" + "\t2. Crawl www.cnproxy.com and save proxies to proxies.json:\n" + "\t\tcrawl CnProxyComPlugin proxies.json\n" + "\t3. Check the speed of proxies.\n" + "\t\tcheck proxies.json valid_proxies.json\n" + "\t4. Filter proxies by schema\n" + "\t\tfilter valid_proxies.json HTTP http.json\n" if (args.length < 2) { println(usage) return } val start = System.currentTimeMillis if (args(0) == "crawl") { val classNames = if (args.length == 2) { Array("CnProxyComPlugin", "CoolProxyNetPlugin", "GatherproxyComPlugin", "IpcnOrgPlugin", "ProxyListOrg", "SocksProxyNet", "USProxyOrgPlugin") } else { args.slice(1, args.length-1) } val crawler = ProxyCrawler(classNames: _*) val proxies = crawler.crawl() LOGGER.info("Writing to disk, " + proxies.size + " proxies") val json = OBJECT_MAPPER.writerWithDefaultPrettyPrinter.writeValueAsString(proxies) Files.write(Paths.get(args.last), json.getBytes(StandardCharsets.UTF_8)) } else if (args(0) == "check") { val json = io.Source.fromFile(args(1), "utf-8").mkString val list = OBJECT_MAPPER.readValue[List[ProxyInfo]](json) // sort by speed desc val validProxies = ProxyChecker.check(list).filter(_.speed > 0) .sortWith((p1, p2) => p1.speed > p2.speed) LOGGER.info("Writing to disk, " + validProxies.size + " valid proxies out of " + list.size + " proxies") val newJson = OBJECT_MAPPER.writerWithDefaultPrettyPrinter .writeValueAsString(validProxies) Files.write(Paths.get(args(2)), newJson.getBytes(StandardCharsets.UTF_8)) } else if (args(0) == "filter") { val json = io.Source.fromFile(args(1), "utf-8").mkString val list = OBJECT_MAPPER.readValue[List[ProxyInfo]](json) val filtered = if (args(2) == "SOCKS") { list.filter(p => p.schema == "SOCKS" | p.schema == "SOCKS4" || p.schema == "SOCKS5") } else { list.filter(p => p.schema == args(2)) } val newJson = OBJECT_MAPPER.writerWithDefaultPrettyPrinter .writeValueAsString(filtered) Files.write(Paths.get(args(3)), newJson.getBytes(StandardCharsets.UTF_8)) } else { println(usage) return } val end = System.currentTimeMillis LOGGER.info("Time elapsed " + (end - start) / 1000 + " seconds.") } }
Example 107
Source File: ProxyCrawler.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler import java.io.IOException import java.net.URI import java.security.cert.X509Certificate import com.typesafe.scalalogging.Logger import org.apache.http.client.methods.HttpGet import org.apache.http.impl.client.HttpClients import org.apache.http.ssl.{TrustStrategy, SSLContexts} import org.apache.http.conn.ssl.{NoopHostnameVerifier, SSLConnectionSocketFactory} import org.apache.http.util.EntityUtils import org.crowdcrawler.proxycrawler.crawler.plugins.AbstractPlugin import org.apache.http.HttpHeaders import org.slf4j.LoggerFactory import scala.collection.immutable import scala.collection.mutable class ProxyCrawler(plugins: List[AbstractPlugin]) { *;q=0.8"), (HttpHeaders.ACCEPT_ENCODING, "gzip, deflate, sdch"), (HttpHeaders.ACCEPT_LANGUAGE, "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4"), (HttpHeaders.CONNECTION, "keep-alive") ) private val CLIENT = { // trust all certificates including self-signed certificates val sslContext = SSLContexts.custom().loadTrustMaterial(null, new TrustStrategy() { def isTrusted(chain: Array[X509Certificate], authType: String) = true }).build() val connectionFactory = new SSLConnectionSocketFactory(sslContext, NoopHostnameVerifier.INSTANCE) HttpClients.custom().setSSLSocketFactory(connectionFactory).build() } def apply(classNames: String*): ProxyCrawler = { val plugins = mutable.ListBuffer.empty[AbstractPlugin] for (className <- classNames) { val clazz = Class.forName("org.crowdcrawler.proxycrawler.crawler.plugins." + className) plugins += clazz.newInstance().asInstanceOf[AbstractPlugin] } new ProxyCrawler(plugins.toList) } private def createRequest(uri: URI, headers: immutable.Map[String, String]): HttpGet = { val request = new HttpGet(uri) for (header <- headers) { request.setHeader(header._1, header._2) } request } }
Example 108
Source File: ProxyChecker.scala From ProxyCrawler with Apache License 2.0 | 5 votes |
package org.crowdcrawler.proxycrawler.checker import java.io.IOException import java.net.SocketTimeoutException import com.typesafe.scalalogging.Logger import org.apache.http.annotation.ThreadSafe import org.apache.http.conn.ConnectTimeoutException import org.crowdcrawler.proxycrawler.ProxyInfo import org.slf4j.LoggerFactory import scala.collection.parallel.ForkJoinTaskSupport import scala.concurrent.forkjoin.ForkJoinPool @ThreadSafe object ProxyChecker { private val LOGGER = Logger(LoggerFactory.getLogger(ProxyChecker.getClass.getName)) private def check(proxyInfo: ProxyInfo): ProxyInfo = { val start = System.currentTimeMillis try { LOGGER.info("Executing request via proxy " + proxyInfo) val (statusCode, bytes) = proxyInfo.schema match { case "HTTP" => HttpProxyChecker.check(proxyInfo.host, proxyInfo.port) case "HTTPS" => HttpsProxyChecker.check(proxyInfo.host, proxyInfo.port) case "SOCKS" | "SOCKS4" | "SOCKS5" => SocksProxyChecker.check(proxyInfo.host, proxyInfo.port) case other => throw new IllegalArgumentException("Unsupported schema " + other) } val end = System.currentTimeMillis LOGGER.info("Time elapsed " + (end - start) + " milliseconds") if (statusCode != 200) { LOGGER.error("HTTP status code is " + statusCode) ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, -1, proxyInfo.location, proxyInfo.from) } else { if (bytes > 0) { val speed = (bytes / ((end - start) / 1000.0)).toInt LOGGER.info("Speed is " + speed + " bytes/s") ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, speed, proxyInfo.location, proxyInfo.from) } else { LOGGER.error("HTTP status code is 200 but the proxy failed to retrieve HTML source code") if (proxyInfo.speed >= 0) { ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, -1, proxyInfo.location, proxyInfo.from) } else { ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, proxyInfo.speed - 1, proxyInfo.location, proxyInfo.from) } } } } catch { case e: IOException => val end = System.currentTimeMillis if (e.isInstanceOf[ConnectTimeoutException] || e.isInstanceOf[SocketTimeoutException]) { LOGGER.info(e.getClass.getName + " : " + e.getMessage) LOGGER.info("Time elapsed " + (end - start) + " milliseconds") } else { LOGGER.error(e.getClass.getName + " : " + e.getMessage) LOGGER.error("Time elapsed " + (end - start) + " milliseconds") } if (proxyInfo.speed >= 0) { ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, -1, proxyInfo.location, proxyInfo.from) } else { ProxyInfo(proxyInfo.host, proxyInfo.port, proxyInfo.schema, proxyInfo.speed - 1, proxyInfo.location, proxyInfo.from) } } } }
Example 109
Source File: HttpApiHandlers.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.http import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.{ ExceptionWithErrorInfo, StatusCode } import akka.http.scaladsl.server._ import com.typesafe.scalalogging.Logger import io.vamp.common.notification.NotificationErrorException import org.slf4j.LoggerFactory trait HttpApiHandlers { this: HttpApiDirectives ⇒ private val logger = Logger(LoggerFactory.getLogger(getClass)) implicit def exceptionHandler: ExceptionHandler = ExceptionHandler { case e: NotificationErrorException ⇒ respondWithError(BadRequest, s"${e.message}") case e: Exception ⇒ extractUri { uri ⇒ logger.error("Request to {} could not be handled normally: {}", uri, e.getMessage) e match { case _: ExceptionWithErrorInfo ⇒ respondWithError(BadRequest) case _ ⇒ respondWithError(InternalServerError) } } } implicit def rejectionHandler: RejectionHandler = RejectionHandler.newBuilder() .handle { case MalformedRequestContentRejection(message, e: NotificationErrorException) ⇒ respondWithError(BadRequest, s"$message") } .handle { case MalformedRequestContentRejection(message, ex) ⇒ logger.error(ex.getMessage) respondWithError(BadRequest) } .handle { case MalformedRequestContentRejection(message, _) ⇒ respondWithError(BadRequest) } .handle { case MalformedHeaderRejection(_, message, _) ⇒ respondWithError(BadRequest, s"$message") } .handle { case ValidationRejection(message, _) ⇒ respondWithError(BadRequest, s"$message") } .result().withFallback(RejectionHandler.default) private def respondWithError(status: StatusCode, message: String = "") = { logger.warn("There has been an error with status code {} with message {}", status.value, message) respondWith( status = status, response = "message" → (if (status == InternalServerError) "Internal server error." else message) ) } }
Example 110
Source File: ActorBootstrap.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.akka import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.util.Timeout import com.typesafe.scalalogging.Logger import io.vamp.common.{ ClassProvider, Namespace } import org.slf4j.{ LoggerFactory, MDC } import scala.concurrent.Future import scala.reflect.{ ClassTag, classTag } trait Bootstrap extends BootstrapLogger { def start(): Future[Unit] = Future.successful(()) def stop(): Future[Unit] = Future.successful(()) } trait ActorBootstrap extends BootstrapLogger { private var actors: Future[List[ActorRef]] = Future.successful(Nil) def createActors(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[List[ActorRef]] def start(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = { info(s"Starting ${getClass.getSimpleName}") actors = createActors(actorSystem, namespace, timeout) actors.map(_ ⇒ ())(actorSystem.dispatcher) } def restart(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[Unit] = { stop.flatMap(_ ⇒ start)(actorSystem.dispatcher) } def stop(implicit actorSystem: ActorSystem, namespace: Namespace): Future[Unit] = { info(s"Stopping ${getClass.getSimpleName}") actors.map(_.reverse.foreach(_ ! PoisonPill))(actorSystem.dispatcher) } def alias[T: ClassTag](name: String, default: String ⇒ Future[ActorRef])(implicit actorSystem: ActorSystem, namespace: Namespace, timeout: Timeout): Future[ActorRef] = { ClassProvider.find[T](name).map { clazz ⇒ IoC.alias(classTag[T].runtimeClass, clazz) IoC.createActor(clazz) } getOrElse default(name) } } trait BootstrapLogger { protected val logger = Logger(LoggerFactory.getLogger(getClass)) protected def info(message: String)(implicit namespace: Namespace): Unit = { MDC.put("namespace", namespace.name) try logger.info(message) finally MDC.remove("namespace") } }
Example 111
Source File: MessageResolver.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.notification import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import org.yaml.snakeyaml.Yaml import scala.collection.JavaConverters._ import scala.collection.mutable import scala.io.Source import scala.language.postfixOps trait MessageResolverProvider { val messageResolver: MessageResolver trait MessageResolver { def resolve(implicit notification: Notification): String } } trait DefaultPackageMessageResolverProvider extends MessageResolverProvider { val messageResolver: MessageResolver = new DefaultPackageMessageResolver() private class DefaultPackageMessageResolver extends MessageResolver { protected case class Message(parts: Seq[String], args: Seq[String]) private val logger = Logger(LoggerFactory.getLogger(classOf[Notification])) private val messages = new mutable.LinkedHashMap[String, mutable.Map[String, Any]]() def resolve(implicit notification: Notification): String = { try { val name = notification.getClass.getSimpleName val messageSource = resolveMessageSource messageSource.get(name) match { case None ⇒ logger.warn(s"No mapping for ${notification.getClass}") defaultMapping(error = false) case Some(value: Message) ⇒ resolveMessageValue(value) case Some(value: Any) ⇒ val message = parseMessage(value.toString) messageSource.put(name, message) resolveMessageValue(message) } } catch { case e: NoSuchMethodException ⇒ val field = e.getMessage.substring(e.getMessage.lastIndexOf('.') + 1, e.getMessage.length - 2) logger.error(s"Message mapping error: field '$field' not defined for ${notification.getClass}") defaultMapping() case e: Exception ⇒ logger.error(e.getMessage, e) defaultMapping() } } protected def defaultMapping(error: Boolean = true)(implicit notification: Notification): String = if (error) "Error." else "Notification." protected def resolveMessageSource(implicit notification: Notification): mutable.Map[String, Any] = { val packageName = notification.getClass.getPackage.toString messages.get(packageName) match { case None ⇒ val reader = Source.fromURL(notification.getClass.getResource("messages.yml")).bufferedReader() try { val input = new Yaml().load(reader).asInstanceOf[java.util.Map[String, Any]].asScala messages.put(packageName, input) input } finally { reader.close() } case Some(map) ⇒ map } } protected def parseMessage(message: String)(implicit notification: Notification): Message = { val pattern = "\\{[^}]+\\}" r val parts = pattern split message val args = (pattern findAllIn message).map(s ⇒ s.substring(1, s.length - 1)).toList Message(parts, args) } protected def resolveMessageValue(message: Message)(implicit notification: Notification): String = { val pi = message.parts.iterator val ai = message.args.iterator val sb = new StringBuilder() while (ai.hasNext) { sb append pi.next sb append ai.next().split('.').foldLeft(notification.asInstanceOf[AnyRef])((arg1, arg2) ⇒ arg1.getClass.getDeclaredMethod(arg2).invoke(arg1)).toString } if (pi.hasNext) sb append pi.next sb.toString() } } }
Example 112
Source File: EventSource.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.client import java.util.UUID import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.persistence.query.{NoOffset, Offset, Sequence, TimeBasedUUID} import akka.stream.Materializer import akka.stream.alpakka.sse.scaladsl.{EventSource => SSESource} import akka.stream.scaladsl.Source import ch.epfl.bluebrain.nexus.rdf.implicits._ import ch.epfl.bluebrain.nexus.iam.client.types.AuthToken import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri import com.typesafe.scalalogging.Logger import io.circe.Decoder import io.circe.parser.decode import scala.concurrent.{ExecutionContext, Future} import scala.util.Try trait EventSource[A] { def apply[A: Decoder]( config: KgClientConfig )(implicit as: ActorSystem, mt: Materializer, ec: ExecutionContext): EventSource[A] = new EventSource[A] { private val logger = Logger[this.type] private val http = Http() private def addCredentials(request: HttpRequest)(implicit cred: Option[AuthToken]): HttpRequest = cred.map(token => request.addCredentials(OAuth2BearerToken(token.value))).getOrElse(request) private def send(request: HttpRequest)(implicit cred: Option[AuthToken]): Future[HttpResponse] = http.singleRequest(addCredentials(request)).map { resp => if (!resp.status.isSuccess()) logger.warn(s"HTTP response when performing SSE request: status = '${resp.status}'") resp } private def toOffset(id: String): Offset = Try(TimeBasedUUID(UUID.fromString(id))).orElse(Try(Sequence(id.toLong))).getOrElse(NoOffset) override def apply(iri: AbsoluteIri, offset: Option[String])( implicit cred: Option[AuthToken] ): Source[(Offset, A), NotUsed] = SSESource(iri.asAkka, send, offset, config.sseRetryDelay).flatMapConcat { sse => val offset = sse.id.map(toOffset).getOrElse(NoOffset) decode[A](sse.data) match { case Right(ev) => Source.single(offset -> ev) case Left(err) => logger.error(s"Failed to decode admin event '$sse'", err) Source.empty } } } }
Example 113
Source File: SparkOperationTestPimpers.scala From sparkplug with MIT License | 5 votes |
package springnz.sparkplug.testkit import com.typesafe.scalalogging.{ LazyLogging, Logger } import org.apache.spark.rdd.RDD import org.apache.spark.sql.{ DataFrame, SQLContext } import springnz.sparkplug.core.SparkOperation import springnz.sparkplug.util.Logging import scala.reflect.ClassTag object SparkOperationTestPimpers extends LazyLogging { private def persistTestResource[A: ClassTag](rdd: RDD[A], rddName: String, overwrite: Boolean = false)( implicit projectName: ProjectName): RDD[A] = { val path = RDDPersister.getPath(projectName.name, rddName) if (overwrite || (!overwrite && !path.exists)) { if (path.exists) { logger.info(s"deleting existing RDD at ${path.pathAsString}") path.delete() } RDDPersister.persistRDD(path.pathAsString, rdd) } else { // (!overwrite && path.exists) logger.info(s"Not persisting RDD that already exists at path [${path.pathAsString}]") rdd } } class RDDExtensions[A: ClassTag](operation: SparkOperation[RDD[A]]) { import RDDSamplers._ def saveTo(rddName: String, sampler: RDD[A] ⇒ RDD[A] = identitySampler)( implicit projectName: ProjectName): SparkOperation[RDD[A]] = operation.map { rdd ⇒ val sampled = sampler(rdd) persistTestResource(sampled, rddName, overwrite = false) sampled } def sourceFrom(rddName: String, sampler: RDD[A] ⇒ RDD[A] = identitySampler)( implicit projectName: ProjectName): SparkOperation[RDD[A]] = SparkOperation { ctx ⇒ val path = RDDPersister.getPath(projectName.name, rddName) if (path.exists) ctx.objectFile[A](path.pathAsString) else { val rdd = operation.run(ctx) val sampled = sampler(rdd) persistTestResource(sampled, rddName, overwrite = false) sampled } } } class DataFrameExtensions(operation: SparkOperation[DataFrame]) { import RDDSamplers._ def saveTo(rddName: String, overwrite: Boolean = false, sampler: RDD[String] ⇒ RDD[String] = identitySampler)( implicit projectName: ProjectName): SparkOperation[DataFrame] = operation.map { df ⇒ val rdd: RDD[String] = df.toJSON val sampled = sampler(rdd) persistTestResource(sampled, rddName, overwrite) val sqlContext = new SQLContext(sampled.sparkContext) sqlContext.read.json(sampled) } def sourceFrom(dataFrameName: String, overwrite: Boolean = false, sampler: RDD[String] ⇒ RDD[String] = rdd ⇒ rdd)( implicit projectName: ProjectName, log: Logger): SparkOperation[DataFrame] = SparkOperation { ctx ⇒ val path = RDDPersister.getPath(projectName.name, dataFrameName) val sampledRDD = if (path.exists) ctx.objectFile[String](path.pathAsString) else { val df = operation.run(ctx) val rdd: RDD[String] = df.toJSON val sampled = sampler(rdd) persistTestResource(sampled, dataFrameName, overwrite) sampled } val sqlContext = new SQLContext(ctx) sqlContext.read.json(sampledRDD) } } }
Example 114
Source File: Pimpers.scala From sparkplug with MIT License | 5 votes |
package springnz.sparkplug.util import com.typesafe.scalalogging.Logger import scala.concurrent.{ ExecutionContext, Future } import scala.language.implicitConversions import scala.util.{ Failure, Try } private[sparkplug] object Pimpers { implicit class TryPimper[A](t: Try[A]) { def withErrorLog(msg: String)(implicit log: Logger): Try[A] = t.recoverWith { case e ⇒ log.error(msg, e) Failure(e) } def withFinally[T](block: ⇒ T): Try[A] = { block t } } implicit class FuturePimper[T](f: Future[T]) { def withErrorLog(msg: String)(implicit log: Logger, ec: ExecutionContext): Future[T] = { f.onFailure { case e ⇒ log.error(msg, e) } f } } implicit def map2Properties(map: Map[String, String]): java.util.Properties = { (new java.util.Properties /: map) { case (props, (k, v)) ⇒ props.put(k, v); props } } }
Example 115
Source File: AutoTestSuite.scala From jvm-toxcore-c with GNU General Public License v3.0 | 5 votes |
package im.tox.tox4j.testing.autotest import com.typesafe.scalalogging.Logger import im.tox.tox4j.TestConstants import im.tox.tox4j.core.data.ToxFriendNumber import im.tox.tox4j.core.enums.ToxConnection import im.tox.tox4j.core.options.ToxOptions import im.tox.tox4j.impl.jni.{ ToxAvImplFactory, ToxCoreImplFactory } import im.tox.tox4j.testing.autotest.AutoTest.ClientState import org.scalatest.FunSuite import org.scalatest.concurrent.Timeouts import org.slf4j.LoggerFactory import shapeless.<:!< import scala.util.Random object AutoTestSuite { sealed abstract class Timed[A, R] { protected def wrap(time: Int, result: A): R def timed(block: => A): R = { val start = System.currentTimeMillis() val result = block val end = System.currentTimeMillis() wrap((end - start).toInt, result) } } implicit def otherTimed[A](implicit notUnit: A <:!< Unit): Timed[A, (Int, A)] = new Timed[A, (Int, A)] { protected def wrap(time: Int, result: A): (Int, A) = (time, result) } implicit val unitTimed: Timed[Unit, Int] = new Timed[Unit, Int] { protected def wrap(time: Int, result: Unit): Int = time } def timed[A, R](block: => A)(implicit timed: Timed[A, R]): R = timed.timed(block) } abstract class AutoTestSuite extends FunSuite with Timeouts { private val logger = Logger(LoggerFactory.getLogger(getClass)) protected def maxParticipantCount: Int = 2 type S abstract class EventListener(val initial: S) extends AutoTest.EventListener[S] { override def selfConnectionStatus( connectionStatus: ToxConnection )(state: State): State = { debug(state, s"Our connection: $connectionStatus") state } override def friendConnectionStatus( friendNumber: ToxFriendNumber, connectionStatus: ToxConnection )(state: State): State = { debug(state, s"Friend ${state.id(friendNumber)}'s connection: $connectionStatus") state } } def Handler: EventListener // scalastyle:ignore method.name protected def debug(state: ClientState[S], message: String): Unit = { logger.debug(s"[${state.id}] $message") } @SuppressWarnings(Array("org.wartremover.warts.Equals")) def run(ipv6Enabled: Boolean = true, udpEnabled: Boolean = true): Unit = { failAfter(TestConstants.Timeout) { val participantCount = if (maxParticipantCount == 2) { maxParticipantCount } else { new Random().nextInt(maxParticipantCount - 2) + 2 } AutoTest(ToxCoreImplFactory, ToxAvImplFactory).run(participantCount, ToxOptions(ipv6Enabled, udpEnabled), Handler) } } test("UDP")(run(ipv6Enabled = true, udpEnabled = true)) }
Example 116
Source File: AliceBobTestBase.scala From jvm-toxcore-c with GNU General Public License v3.0 | 5 votes |
package im.tox.tox4j.testing.autotest import com.typesafe.scalalogging.Logger import im.tox.tox4j.OptimisedIdOps._ import im.tox.tox4j.av.ToxAv import im.tox.tox4j.core.ToxCore import im.tox.tox4j.core.data.ToxFriendNumber import im.tox.tox4j.testing.ToxTestMixin import im.tox.tox4j.testing.autotest.AliceBobTestBase.Chatter import org.scalatest.FunSuite import org.slf4j.LoggerFactory import scala.annotation.tailrec object AliceBobTestBase { val FriendNumber: ToxFriendNumber = ToxFriendNumber.fromInt(10).get final case class Chatter[T]( tox: ToxCore, av: ToxAv, client: ChatClientT[T], state: ChatStateT[T] ) } abstract class AliceBobTestBase extends FunSuite with ToxTestMixin { protected val logger = Logger(LoggerFactory.getLogger(classOf[AliceBobTestBase])) protected type State protected type ChatState = ChatStateT[State] protected type ChatClient = ChatClientT[State] protected def initialState: State protected def newChatClient(name: String, expectedFriendName: String): ChatClient @SuppressWarnings(Array("org.wartremover.warts.Equals")) private def getTopLevelMethod(stackTrace: Seq[StackTraceElement]): String = { stackTrace .filter(_.getClassName == classOf[AliceBobTest].getName) .lastOption .fold("<unknown>")(_.getMethodName) } @tailrec private def mainLoop(clients: Seq[Chatter[State]]): Unit = { val nextState = clients.map { case Chatter(tox, av, client, state) => Chatter[State](tox, av, client, state |> tox.iterate(client) |> (_.runTasks(tox, av))) } val interval = (nextState.map(_.tox.iterationInterval) ++ nextState.map(_.av.iterationInterval)).min Thread.sleep(interval) if (nextState.exists(_.state.chatting)) { mainLoop(nextState) } } @SuppressWarnings(Array("org.wartremover.warts.Equals")) protected def runAliceBobTest( withTox: (ToxCore => Unit) => Unit, withToxAv: ToxCore => (ToxAv => Unit) => Unit ): Unit = { val method = getTopLevelMethod(Thread.currentThread.getStackTrace) logger.info(s"[${Thread.currentThread.getId}] --- ${getClass.getSimpleName}.$method") val aliceChat = newChatClient("Alice", "Bob") val bobChat = newChatClient("Bob", "Alice") withTox { alice => withTox { bob => withToxAv(alice) { aliceAv => withToxAv(bob) { bobAv => assert(alice ne bob) addFriends(alice, AliceBobTestBase.FriendNumber.value) addFriends(bob, AliceBobTestBase.FriendNumber.value) alice.addFriendNorequest(bob.getPublicKey) bob.addFriendNorequest(alice.getPublicKey) aliceChat.expectedFriendAddress = bob.getAddress bobChat.expectedFriendAddress = alice.getAddress val aliceState = aliceChat.setup(alice)(ChatStateT[State](initialState)) val bobState = bobChat.setup(bob)(ChatStateT[State](initialState)) mainLoop(Seq( Chatter(alice, aliceAv, aliceChat, aliceState), Chatter(bob, bobAv, bobChat, bobState) )) } } } } } }
Example 117
Source File: DhtNodeSelector.scala From jvm-toxcore-c with GNU General Public License v3.0 | 5 votes |
package im.tox.tox4j import java.io.IOException import java.net.{ InetAddress, Socket } import com.typesafe.scalalogging.Logger import im.tox.tox4j.core.ToxCore import im.tox.tox4j.impl.jni.ToxCoreImplFactory import org.scalatest.Assertions import org.slf4j.LoggerFactory object DhtNodeSelector extends Assertions { private val logger = Logger(LoggerFactory.getLogger(this.getClass)) private var selectedNode: Option[DhtNode] = Some(ToxCoreTestBase.nodeCandidates(0)) @SuppressWarnings(Array("org.wartremover.warts.Equals")) private def tryConnect(node: DhtNode): Option[DhtNode] = { var socket: Socket = null try { socket = new Socket(InetAddress.getByName(node.ipv4), node.udpPort.value) assume(socket.getInputStream != null) Some(node) } catch { case e: IOException => logger.info(s"TCP connection failed (${e.getMessage})") None } finally { if (socket != null) { socket.close() } } } private def tryBootstrap( withTox: (Boolean, Boolean) => (ToxCore => Option[DhtNode]) => Option[DhtNode], node: DhtNode, udpEnabled: Boolean ): Option[DhtNode] = { val protocol = if (udpEnabled) "UDP" else "TCP" val port = if (udpEnabled) node.udpPort else node.tcpPort logger.info(s"Trying to bootstrap with ${node.ipv4}:$port using $protocol") withTox(true, udpEnabled) { tox => val status = new ConnectedListener if (!udpEnabled) { tox.addTcpRelay(node.ipv4, port, node.dhtId) } tox.bootstrap(node.ipv4, port, node.dhtId) // Try bootstrapping for 10 seconds. (0 to 10000 / tox.iterationInterval) find { _ => tox.iterate(status)(()) Thread.sleep(tox.iterationInterval) status.isConnected } match { case Some(time) => logger.info(s"Bootstrapped successfully after ${time * tox.iterationInterval}ms using $protocol") Some(node) case None => logger.info(s"Unable to bootstrap with $protocol") None } } } private def findNode(withTox: (Boolean, Boolean) => (ToxCore => Option[DhtNode]) => Option[DhtNode]): DhtNode = { DhtNodeSelector.selectedNode match { case Some(node) => node case None => logger.info("Looking for a working bootstrap node") DhtNodeSelector.selectedNode = ToxCoreTestBase.nodeCandidates find { node => logger.info(s"Trying to establish a TCP connection to ${node.ipv4}") (for { node <- tryConnect(node) node <- tryBootstrap(withTox, node, udpEnabled = true) node <- tryBootstrap(withTox, node, udpEnabled = false) } yield node).isDefined } assume(DhtNodeSelector.selectedNode.nonEmpty, "No viable nodes for bootstrap found; cannot test") DhtNodeSelector.selectedNode.get } } def node: DhtNode = findNode(ToxCoreImplFactory.withToxUnit[Option[DhtNode]]) }
Example 118
Source File: ToxImplBase.scala From jvm-toxcore-c with GNU General Public License v3.0 | 5 votes |
package im.tox.tox4j.impl import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory import scala.util.control.NonFatal object ToxImplBase { private val logger = Logger(LoggerFactory.getLogger(this.getClass)) @SuppressWarnings(Array("org.brianmckenna.wartremover.warts.Throw")) def tryAndLog[ToxCoreState, T](fatal: Boolean, state: ToxCoreState, eventHandler: T)(callback: T => ToxCoreState => ToxCoreState): ToxCoreState = { if (!fatal) { try { callback(eventHandler)(state) } catch { case NonFatal(e) => logger.warn("Exception caught while executing " + eventHandler.getClass.getName, e) state } } else { callback(eventHandler)(state) } } }
Example 119
Source File: RepairFromMessages.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import java.net.URLDecoder import java.util.UUID import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import ch.epfl.bluebrain.nexus.kg.resources.{Id, Repo, ResId} import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.rdf.Iri import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler import monix.execution.schedulers.CanBlock import scala.concurrent.Future import scala.util.Try object RepairFromMessages { // $COVERAGE-OFF$ private val log = Logger[RepairFromMessages.type] def repair(repo: Repo[Task])(implicit as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = { log.info("Repairing dependent tables from messages.") val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) Task .fromFuture { pq.currentPersistenceIds() .mapAsync(1) { case ResourceId(id) => (repo.get(id, None).value >> Task.unit).runToFuture case other => log.warn(s"Unknown persistence id '$other'") Future.successful(()) } .runFold(0) { case (acc, _) => if (acc % 1000 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .map(_ => ()) } .runSyncUnsafe() log.info("Finished repairing dependent tables from messages.") } object ResourceId { private val regex = "^resources\\-([0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12})\\-(.+)$".r def unapply(arg: String): Option[ResId] = arg match { case regex(stringUuid, stringId) => for { uuid <- Try(UUID.fromString(stringUuid)).toOption iri <- Iri.absolute(URLDecoder.decode(stringId, "UTF-8")).toOption } yield Id(ProjectRef(uuid), iri) case _ => None } } // $COVERAGE-ON$ }
Example 120
Source File: package.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import cats.Monad import cats.implicits._ import ch.epfl.bluebrain.nexus.kg.resources.syntax._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.iam.client.types.{AccessControlList, AccessControlLists} import ch.epfl.bluebrain.nexus.kg.cache.ProjectCache import com.typesafe.scalalogging.Logger package object async { def resolveProjects[F[_]]( acls: AccessControlLists )(implicit projectCache: ProjectCache[F], log: Logger, F: Monad[F]): F[Map[Project, AccessControlList]] = acls.value.foldLeft(F.pure(Map.empty[Project, AccessControlList])) { case (fProjectsMap, (path, resourceAcl)) => val acl = resourceAcl.value for { projectMap <- fProjectsMap projects <- path.resolveProjects } yield projects.foldLeft(projectMap)( (acc, project) => acc + (project -> acc.get(project).map(_ ++ acl).getOrElse(acl)) ) } }
Example 121
Source File: NotificationProvider.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.notification import akka.actor.{ AbstractLoggingActor, Actor, ActorRef } import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory trait NotificationProvider { def message(notification: Notification): String def info(notification: Notification) def reportException(notification: Notification): Exception def throwException(notification: Notification) = throw reportException(notification) } trait LoggingNotificationProvider extends NotificationProvider { this: MessageResolverProvider ⇒ private val logger = Logger(LoggerFactory.getLogger(classOf[Notification])) def message(notification: Notification) = messageResolver.resolve(notification) def info(notification: Notification) = logger.info(message(notification)) def reportException(notification: Notification): Exception = { val msg = message(notification) logger.error(msg) notification match { case error: ErrorNotification ⇒ error.reason match { case reason: Throwable ⇒ logger.error(reason.getMessage, reason) case reason ⇒ logger.error(reason.toString) } case _ ⇒ } NotificationErrorException(notification, msg) } } trait ActorNotificationProvider extends NotificationProvider { this: Actor with MessageResolverProvider ⇒ protected val notificationActor: ActorRef def message(notification: Notification) = messageResolver.resolve(notification) def info(notification: Notification) = { notificationActor ! Info(notification, message(notification)) } def reportException(notification: Notification): Exception = { val msg = message(notification) notificationActor ! Error(notification, msg) NotificationErrorException(notification, msg) } } trait ActorLoggingNotificationProvider extends NotificationProvider { this: AbstractLoggingActor with MessageResolverProvider ⇒ protected val notificationActor: ActorRef def message(notification: Notification) = messageResolver.resolve(notification) def info(notification: Notification) = { val msg = message(notification) log.info(msg) notificationActor ! Info(notification, msg) } def reportException(notification: Notification): Exception = { val msg = message(notification) log.error(msg) notificationActor ! Error(notification, msg) NotificationErrorException(notification, msg) } }
Example 122
Source File: ViewIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ViewIndexer { private implicit val log = Logger[ViewIndexer.type] def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "view-indexer" def toView(event: Event): F[Option[View]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => views.fetchView(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(view) => Some(view) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toView) .collectSome[View] .runAsync(viewCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 123
Source File: ResolverIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ResolverIndexer { private implicit val log = Logger[ResolverIndexer.type] final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])( implicit projectCache: ProjectCache[F], as: ActorSystem, F: Effect[F], projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "resolver-indexer" def toResolver(event: Event): F[Option[Resolver]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => resolvers.fetchResolver(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(resolver) => Some(resolver) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toResolver) .collectSome[Resolver] .runAsync(resolverCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 124
Source File: StorageIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import java.time.Instant import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object StorageIndexer { private implicit val log = Logger[StorageIndexer.type] def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "storage-indexer" def toStorage(event: Event): F[Option[(Storage, Instant)]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => storages.fetchStorage(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(timedStorage) => Some(timedStorage) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toStorage) .collectSome[(Storage, Instant)] .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 125
Source File: ElasticSearchIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object ElasticSearchIndexer { private implicit val log: Logger = Logger[ElasticSearchIndexer.type] final def start[F[_]: Timer]( view: ElasticSearchView, resources: Resources[F], project: Project, restartOffset: Boolean )( implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: AppConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.elasticSearch.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.elasticSearch.askTimeout) val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.elasticSearch.indexing.retry) def deleteOrIndex(res: ResourceV): Option[BulkOp] = if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res)) else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc)) def delete(res: ResourceV): BulkOp = BulkOp.Delete(view.index, res.id.value.asString) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res) case res if view.allowedSchemas(res) => Some(delete(res)) } .collectSome[BulkOp] .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 126
Source File: AuthDirectives.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.directives import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.directives.FutureDirectives.onComplete import akka.http.scaladsl.server.{Directive0, Directive1} import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.iam.client.types._ import ch.epfl.bluebrain.nexus.iam.client.{IamClient, IamClientError} import ch.epfl.bluebrain.nexus.kg.KgError.{AuthenticationFailed, AuthorizationFailed, InternalError} import ch.epfl.bluebrain.nexus.kg.resources.syntax._ import com.typesafe.scalalogging.Logger import monix.eval.Task import monix.execution.Scheduler.Implicits.global import scala.util.{Failure, Success} object AuthDirectives { private val logger = Logger[this.type] def extractCaller(implicit iam: IamClient[Task], token: Option[AuthToken]): Directive1[Caller] = onComplete(iam.identities.runToFuture).flatMap { case Success(caller) => provide(caller) case Failure(_: IamClientError.Unauthorized) => failWith(AuthenticationFailed) case Failure(_: IamClientError.Forbidden) => failWith(AuthorizationFailed) case Failure(err) => val message = "Error when trying to extract the subject" logger.error(message, err) failWith(InternalError(message)) } }
Example 127
Source File: MigrateV12ToV13.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg import akka.actor.ActorSystem import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.{EventEnvelope, NoOffset, PersistenceQuery} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.test.Resources import ch.epfl.bluebrain.nexus.iam.client.types._ import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources.Event.{Created, Updated} import ch.epfl.bluebrain.nexus.kg.resources.{OrganizationRef, ResId, Views} import com.typesafe.scalalogging.Logger import io.circe.Json import io.circe.parser.parse import monix.eval.Task import monix.execution.Scheduler import monix.execution.schedulers.CanBlock import scala.concurrent.Future object MigrateV12ToV13 extends Resources { private val log = Logger[MigrateV12ToV13.type] private val newMapping = jsonContentOf("/elasticsearch/mapping.json") private val defaultEsId = nxv.defaultElasticSearchIndex.value private implicit val mockedAcls: AccessControlLists = AccessControlLists.empty def migrate( views: Views[Task], adminClient: AdminClient[Task] )(implicit config: AppConfig, as: ActorSystem, sc: Scheduler, pm: CanBlock): Unit = { implicit val token: Option[AuthToken] = config.iam.serviceAccountToken def checkAndUpdateMapping(id: ResId, rev: Long, source: Json)( implicit project: Project, caller: Caller ): Task[Unit] = { source.hcursor.get[String]("mapping").flatMap(parse) match { case Left(err) => log.error(s"Error while fetching mapping for view id ${id.show}. Reason: '$err'") Task.unit case Right(mapping) if mapping == newMapping => Task.unit case _ => views.update(id, rev, source deepMerge Json.obj("mapping" -> newMapping)).value.flatMap { case Left(err) => log.error(s"Error updating the view with id '${id.show}' and rev '$rev'. Reason: '$err'") Task.unit case _ => log.info(s"View with id '${id.show}' and rev '$rev' was successfully updated.") Task.unit } } } def fetchProject(orgRef: OrganizationRef, id: ResId)(f: Project => Task[Unit]): Task[Unit] = { adminClient.fetchProject(orgRef.id, id.parent.id).flatMap { case Some(project) => f(project) case None => log.error(s"Project with id '${id.parent.id}' was not found for view with id '${id.show}'") Task.unit } } log.info("Migrating views mappings.") val pq = PersistenceQuery(as).readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) Task .fromFuture { pq.currentEventsByTag(s"type=${nxv.ElasticSearchView.value.asString}", NoOffset) .mapAsync(1) { case EventEnvelope(_, _, _, Created(id, orgRef, _, _, source, _, subject)) if id.value == defaultEsId => fetchProject(orgRef, id) { project => checkAndUpdateMapping(id, 1L, source)(project, Caller(subject, Set(subject))) }.runToFuture case EventEnvelope(_, _, _, Updated(id, orgRef, rev, _, source, _, subject)) if id.value == defaultEsId => fetchProject(orgRef, id) { project => checkAndUpdateMapping(id, rev, source)(project, Caller(subject, Set(subject))) }.runToFuture case _ => Future.unit } .runFold(0) { case (acc, _) => if (acc % 10 == 0) log.info(s"Processed '$acc' persistence ids.") acc + 1 } .map(_ => ()) } .runSyncUnsafe() log.info("Finished migrating views mappings.") } }
Example 128
Source File: RegenerateConfiguration.scala From lift with MIT License | 5 votes |
package prog_gen import java.io.File import com.typesafe.scalalogging.Logger import exploration.ParameterRewrite import ir.TypeChecker import ir.ast.Lambda import rewriting.utils.{DumpToFile, Utils} import scopt.OParser import utils.CommandLineParser object RegenerateConfiguration { private val logger = Logger(this.getClass) case class Config(input: File = null) val builder = OParser.builder[Config] var cmdArgs: Option[Config] = None val parser = { import builder._ OParser.sequence( programName("RegenerateConfiguration"), opt[File]("input").text("Input files to read").required() .validate(f => if (f.exists) success else failure("File \"" + f.getName + "\" does not exist")) .action((arg, c) => c.copy(input = arg)), help("help").text("Show this message.") ) } def main(args: Array[String]): Unit = { cmdArgs = Some(CommandLineParser(parser, args, Config())) logger.info(s"Arguments: ${args.mkString(" ")}") val topFolder = cmdArgs.get.input val programPaths = topFolder.listFiles().flatMap(firstSubDir => firstSubDir.listFiles().flatMap(secondSubDir => secondSubDir.listFiles().filter(_.isFile))) val concretePrograms = programPaths.par.map(program => try { Some(ParameterRewrite.readLambdaFromFile(program.getAbsolutePath)) } catch { case _: Throwable => None }).collect({ case Some(lambda) => lambda }) logger.info(s"Read ${concretePrograms.length} programs...") saveConfigurations(concretePrograms.toArray.toSeq) } private def saveConfigurations(concretePrograms: Seq[Lambda]) = { val configurationDirectory = "newConfiguration" concretePrograms.foreach(lambda => try { TypeChecker(lambda) val vars = lambda.getVarsInParams() val sizes = GeneratePrograms.getInputSizeCombinations(vars.length) val lambdaString = DumpToFile.dumpLambdaToString(lambda) val hash = DumpToFile.Sha256Hash(lambdaString) val hashPrefix = hash(0) + "/" + hash(1) val thisLambdaConf = s"$configurationDirectory/$hashPrefix/$hash" GeneratePrograms.generateConfigurations(sizes, hash, thisLambdaConf, lambda) } catch { case t: Throwable => logger.warn(t.toString) }) } }
Example 129
Source File: LoggingBootstrap.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.bootstrap import com.typesafe.scalalogging.Logger import io.vamp.common.akka.Bootstrap import io.vamp.model.Model import org.slf4j.LoggerFactory import org.slf4j.bridge.SLF4JBridgeHandler import scala.concurrent.Future abstract class LoggingBootstrap extends Bootstrap { def logo: String def clazz: Class[_] = classOf[Vamp] lazy val version: String = if (Model.version.nonEmpty) s"version ${Model.version}" else "" override def start(): Future[Unit] = Future.successful { val logger = Logger(LoggerFactory.getLogger(clazz)) if (!SLF4JBridgeHandler.isInstalled) { SLF4JBridgeHandler.removeHandlersForRootLogger() SLF4JBridgeHandler.install() } logger.info(logo) } override def stop(): Future[Unit] = Future.successful { if (SLF4JBridgeHandler.isInstalled) SLF4JBridgeHandler.uninstall() } }