java.net.InetAddress Scala Examples

The following examples show how to use java.net.InetAddress. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: Launcher.scala    From sparkplug   with MIT License 7 votes vote down vote up
package springnz.sparkplug.client

import java.net.{ URLEncoder, InetAddress }

import better.files._
import com.typesafe.config.{ ConfigRenderOptions, Config }
import org.apache.spark.launcher.SparkLauncher
import springnz.sparkplug.util.{ BuilderOps, ConfigUtils, Logging, Pimpers }

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{ Properties, Try }

object Launcher extends Logging {
  import BuilderOps._
  import Pimpers._

  def startProcess(launcher: SparkLauncher): Future[Unit] = {
    val processFuture = Future {
      launcher.launch()
    }.withErrorLog("Failed to launch: ")
    processFuture.flatMap {
      process ⇒ executeProcess(process)
    }
  }

  private def executeProcess(process: Process): Future[Unit] = Future {
    val outStream = scala.io.Source.fromInputStream(process.getInputStream)
    for (line ← outStream.getLines()) {
      log.info(line)
    }
    val errorStream = scala.io.Source.fromInputStream(process.getErrorStream)
    for (line ← errorStream.getLines()) {
      log.info(line)
    }
    process.waitFor()
  }

  def launch(clientAkkaAddress: String,
    jarPath: File,
    mainJarPattern: String,
    mainClass: String,
    sparkConfig: Config,
    akkaRemoteConfig: Option[Config],
    sendJars: Boolean = true): Try[Future[Unit]] = Try {

    val fullExtraJarFolder = jarPath.pathAsString

    val sparkHome = Properties.envOrNone("SPARK_HOME")
    val sparkMaster = Properties.envOrElse("SPARK_MASTER", s"spark://${InetAddress.getLocalHost.getHostAddress}:7077")
    log.debug(s"Spark master set to: $sparkMaster")

    // TODO: enable this functionality (need Spark 1.5 for this)
    //    val sparkArgs: Array[String] = config.getString("spark.submit.sparkargs").split(' ')

    if (!sparkMaster.startsWith("local[") && !sparkHome.isDefined)
      throw new RuntimeException("If 'SPARK_MASTER' is not set to local, 'SPARK_HOME' must be set.")

    val appName = mainClass.split('.').last

    val mainJar = jarPath.glob(mainJarPattern).collectFirst { case f ⇒ f.pathAsString }

    val configVars: Seq[(String, String)] = ConfigUtils.configFields(sparkConfig).toSeq

    val akkaRemoteConfigString = akkaRemoteConfig.map { config ⇒
      val configString = config.root().render(ConfigRenderOptions.concise())
      URLEncoder.encode(configString, "UTF-8")
    }

    val launcher = (new SparkLauncher)
      .setIfSome[String](mainJar) { (l, mj) ⇒ l.setAppResource(mj) }
      .setMainClass(mainClass)
      .setAppName(appName)
      .setMaster(sparkMaster)
      .setIfSome[String](sparkHome) { (l, sh) ⇒ l.setSparkHome(sh) }
      .addAppArgs("appName", appName)
      .addAppArgs("clientAkkaAddress", clientAkkaAddress)
      .setIfSome(akkaRemoteConfigString) { (l, config) ⇒ l.addAppArgs("remoteAkkaConfig", config) }
      .setFoldLeft(configVars) { case (launcher, (key, value)) ⇒ launcher.setConf(key, value) }
      .setDeployMode(sparkConfig.getString("spark.deploymode"))

    val extraJarFiles = jarPath.glob("*.jar")
      .map { case f ⇒ f.pathAsString }
      .filterNot(_.contains("/akka-"))

    val launcherWithJars =
      if (sendJars)
        extraJarFiles.foldLeft(launcher) { case (l, jarFile) ⇒ l.addJar(jarFile) }
      else if (extraJarFiles.length == 0) launcher
      else launcher
        .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*")
        .setConf(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*")

    startProcess(launcherWithJars)
  }

} 
Example 2
Source File: GrpcServerOwner.scala    From daml   with Apache License 2.0 6 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.apiserver

import java.io.IOException
import java.net.{BindException, InetAddress, InetSocketAddress}
import java.util.concurrent.TimeUnit.SECONDS

import com.daml.metrics.Metrics
import com.daml.platform.apiserver.GrpcServerOwner._
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import com.google.protobuf.Message
import io.grpc.netty.NettyServerBuilder
import io.grpc._
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.ssl.SslContext

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NoStackTrace

final class GrpcServerOwner(
    address: Option[String],
    desiredPort: Port,
    maxInboundMessageSize: Int,
    sslContext: Option[SslContext] = None,
    interceptors: List[ServerInterceptor] = List.empty,
    metrics: Metrics,
    eventLoopGroups: ServerEventLoopGroups,
    services: Iterable[BindableService],
) extends ResourceOwner[Server] {
  override def acquire()(implicit executionContext: ExecutionContext): Resource[Server] = {
    val host = address.map(InetAddress.getByName).getOrElse(InetAddress.getLoopbackAddress)
    Resource(Future {
      val builder = NettyServerBuilder.forAddress(new InetSocketAddress(host, desiredPort.value))
      builder.sslContext(sslContext.orNull)
      builder.channelType(classOf[NioServerSocketChannel])
      builder.permitKeepAliveTime(10, SECONDS)
      builder.permitKeepAliveWithoutCalls(true)
      builder.directExecutor()
      builder.maxInboundMessageSize(maxInboundMessageSize)
      interceptors.foreach(builder.intercept)
      builder.intercept(new MetricsInterceptor(metrics))
      eventLoopGroups.populate(builder)
      services.foreach { service =>
        builder.addService(service)
        toLegacyService(service).foreach(builder.addService)
      }
      val server = builder.build()
      try {
        server.start()
      } catch {
        case e: IOException if e.getCause != null && e.getCause.isInstanceOf[BindException] =>
          throw new UnableToBind(desiredPort, e.getCause)
      }
      server
    })(server => Future(server.shutdown().awaitTermination()))
  }

  // This exposes the existing services under com.daml also under com.digitalasset.
  // This is necessary to allow applications built with an earlier version of the SDK
  // to still work.
  // The "proxy" services will not show up on the reflection service, because of the way it
  // processes service definitions via protobuf file descriptors.
  private def toLegacyService(service: BindableService): Option[ServerServiceDefinition] = {
    val `com.daml` = "com.daml"
    val `com.digitalasset` = "com.digitalasset"

    val damlDef = service.bindService()
    val damlDesc = damlDef.getServiceDescriptor
    // Only add "proxy" services if it actually contains com.daml in the service name.
    // There are other services registered like the reflection service, that doesn't need the special treatment.
    if (damlDesc.getName.contains(`com.daml`)) {
      val digitalassetName = damlDesc.getName.replace(`com.daml`, `com.digitalasset`)
      val digitalassetDef = ServerServiceDefinition.builder(digitalassetName)
      damlDef.getMethods.forEach { methodDef =>
        val damlMethodDesc = methodDef.getMethodDescriptor
        val digitalassetMethodName =
          damlMethodDesc.getFullMethodName.replace(`com.daml`, `com.digitalasset`)
        val digitalassetMethodDesc =
          damlMethodDesc.toBuilder.setFullMethodName(digitalassetMethodName).build()
        val _ = digitalassetDef.addMethod(
          digitalassetMethodDesc.asInstanceOf[MethodDescriptor[Message, Message]],
          methodDef.getServerCallHandler.asInstanceOf[ServerCallHandler[Message, Message]]
        )
      }
      Option(digitalassetDef.build())
    } else None
  }
}

object GrpcServerOwner {

  final class UnableToBind(port: Port, cause: Throwable)
      extends RuntimeException(
        s"The API server was unable to bind to port $port. Terminate the process occupying the port, or choose a different one.",
        cause)
      with NoStackTrace

} 
Example 3
Source File: LauncherBackend.scala    From drizzle-spark   with Apache License 2.0 6 votes vote down vote up
package org.apache.spark.launcher

import java.net.{InetAddress, Socket}

import org.apache.spark.SPARK_VERSION
import org.apache.spark.launcher.LauncherProtocol._
import org.apache.spark.util.{ThreadUtils, Utils}


  protected def onDisconnected() : Unit = { }

  private def fireStopRequest(): Unit = {
    val thread = LauncherBackend.threadFactory.newThread(new Runnable() {
      override def run(): Unit = Utils.tryLogNonFatalError {
        onStopRequest()
      }
    })
    thread.start()
  }

  private class BackendConnection(s: Socket) extends LauncherConnection(s) {

    override protected def handle(m: Message): Unit = m match {
      case _: Stop =>
        fireStopRequest()

      case _ =>
        throw new IllegalArgumentException(s"Unexpected message type: ${m.getClass().getName()}")
    }

    override def close(): Unit = {
      try {
        super.close()
      } finally {
        onDisconnected()
        _isConnected = false
      }
    }

  }

}

private object LauncherBackend {

  val threadFactory = ThreadUtils.namedThreadFactory("LauncherBackend")

} 
Example 4
Source File: package.scala    From mantis   with Apache License 2.0 6 votes vote down vote up
package io.iohk.ethereum

import java.io.{File, PrintWriter}
import java.net.{Inet6Address, InetAddress}
import java.security.SecureRandom

import io.iohk.ethereum.crypto._
import org.spongycastle.crypto.AsymmetricCipherKeyPair
import org.spongycastle.crypto.params.ECPublicKeyParameters
import org.spongycastle.math.ec.ECPoint
import org.spongycastle.util.encoders.Hex

import scala.io.Source

package object network {

  val ProtocolVersion = 4

  implicit class ECPublicKeyParametersNodeId(val pubKey: ECPublicKeyParameters) extends AnyVal {
    def toNodeId: Array[Byte] =
      pubKey.asInstanceOf[ECPublicKeyParameters].getQ
      .getEncoded(false)
      .drop(1) // drop type info
  }

  def publicKeyFromNodeId(nodeId: String): ECPoint = {
    val bytes = ECDSASignature.uncompressedIndicator +: Hex.decode(nodeId)
    curve.getCurve.decodePoint(bytes)
  }

  def loadAsymmetricCipherKeyPair(filePath: String, secureRandom: SecureRandom): AsymmetricCipherKeyPair = {
    val file = new File(filePath)
    if(!file.exists()){
      val keysValuePair = generateKeyPair(secureRandom)

      //Write keys to file
      val (priv, _) = keyPairToByteArrays(keysValuePair)
      require(file.getParentFile.exists() || file.getParentFile.mkdirs(), "Key's file parent directory creation failed")
      val writer = new PrintWriter(filePath)
      try {
        writer.write(Hex.toHexString(priv))
      } finally {
        writer.close()
      }

      keysValuePair
    } else {
      val reader = Source.fromFile(filePath)
      try {
        val privHex = reader.mkString
        keyPairFromPrvKey(Hex.decode(privHex))
      } finally {
        reader.close()
      }
    }
  }

  
  def getHostName(address: InetAddress): String = {
    val hostName = address.getHostAddress
    address match {
      case _: Inet6Address => s"[$hostName]"
      case _ => hostName
    }
  }

} 
Example 5
Source File: MetricsReporter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.configuration

import java.net.{InetAddress, InetSocketAddress}
import java.nio.file.{Path, Paths}

import com.codahale.metrics
import com.codahale.metrics.{MetricRegistry, ScheduledReporter}
import com.daml.platform.sandbox.config.InvalidConfigException
import com.google.common.net.HostAndPort
import scopt.Read

import scala.util.Try

sealed trait MetricsReporter {
  def register(registry: MetricRegistry): ScheduledReporter
}

object MetricsReporter {

  case object Console extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.ConsoleReporter
        .forRegistry(registry)
        .build()
  }

  final case class Csv(directory: Path) extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.CsvReporter
        .forRegistry(registry)
        .build(directory.toFile)
  }

  final case class Graphite(address: InetSocketAddress) extends MetricsReporter {
    override def register(registry: MetricRegistry): ScheduledReporter =
      metrics.graphite.GraphiteReporter
        .forRegistry(registry)
        .build(new metrics.graphite.Graphite(address))
  }

  object Graphite {
    val defaultHost: InetAddress = InetAddress.getLoopbackAddress
    val defaultPort: Int = 2003

    def apply(): Graphite =
      Graphite(new InetSocketAddress(defaultHost, defaultPort))

    def apply(port: Int): Graphite =
      Graphite(new InetSocketAddress(defaultHost, port))
  }

  implicit val metricsReporterRead: Read[MetricsReporter] = Read.reads {
    _.split(":", 2).toSeq match {
      case Seq("console") => Console
      case Seq("csv", directory) => Csv(Paths.get(directory))
      case Seq("graphite") =>
        Graphite()
      case Seq("graphite", address) =>
        Try(address.toInt)
          .map(port => Graphite(port))
          .recover {
            case _: NumberFormatException =>
              //noinspection UnstableApiUsage
              val hostAndPort = HostAndPort
                .fromString(address)
                .withDefaultPort(Graphite.defaultPort)
              Graphite(new InetSocketAddress(hostAndPort.getHost, hostAndPort.getPort))
          }
          .get
      case _ =>
        throw new InvalidConfigException(
          """Must be one of "console", "csv:PATH", or "graphite[:HOST][:PORT]".""")
    }
  }

} 
Example 6
Source File: AbstractSandboxFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import java.io.File
import java.net.InetAddress

import akka.stream.Materializer
import com.daml.api.util.TimeProvider
import com.daml.bazeltools.BazelRunfiles._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.auth.client.LedgerCallCredentials
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.ledger.api.v1.ledger_identity_service.{
  GetLedgerIdentityRequest,
  LedgerIdentityServiceGrpc
}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc
import com.daml.ledger.client.services.testing.time.StaticTime
import com.daml.ledger.participant.state.v1.SeedService.Seeding
import com.daml.platform.common.LedgerIdMode
import com.daml.platform.sandbox.config.SandboxConfig
import com.daml.platform.sandbox.services.DbInfo
import com.daml.platform.services.time.TimeProviderType
import com.daml.ports.Port
import com.daml.resources.ResourceOwner
import io.grpc.Channel
import org.scalatest.Suite
import scalaz.syntax.tag._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

trait AbstractSandboxFixture extends AkkaBeforeAndAfterAll {
  self: Suite =>

  protected def darFile = new File(rlocation("ledger/test-common/model-tests.dar"))

  protected def ledgerId(token: Option[String] = None): domain.LedgerId =
    domain.LedgerId(
      LedgerIdentityServiceGrpc
        .blockingStub(channel)
        .withCallCredentials(token.map(new LedgerCallCredentials(_)).orNull)
        .getLedgerIdentity(GetLedgerIdentityRequest())
        .ledgerId)

  protected def getTimeProviderForClient(
      implicit mat: Materializer,
      esf: ExecutionSequencerFactory
  ): TimeProvider = {
    Try(TimeServiceGrpc.stub(channel))
      .map(StaticTime.updatedVia(_, ledgerId().unwrap)(mat, esf))
      .fold[TimeProvider](_ => TimeProvider.UTC, Await.result(_, 30.seconds))
  }

  protected def config: SandboxConfig =
    SandboxConfig.default.copy(
      port = Port.Dynamic,
      damlPackages = packageFiles,
      timeProviderType = Some(TimeProviderType.Static),
      scenario = scenario,
      ledgerIdMode = LedgerIdMode.Static(LedgerId("sandbox-server")),
      seeding = Some(Seeding.Weak),
    )

  protected def packageFiles: List[File] = List(darFile)

  protected def scenario: Option[String] = None

  protected def database: Option[ResourceOwner[DbInfo]] = None

  protected def serverHost: String = InetAddress.getLoopbackAddress.getHostName

  protected def serverPort: Port

  protected def channel: Channel
} 
Example 7
Source File: GrpcClientResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.TimeUnit

import com.daml.platform.apiserver.EventLoopGroupOwner
import com.daml.ports.Port
import com.daml.resources.{Resource, ResourceOwner}
import io.grpc.Channel
import io.grpc.netty.NettyChannelBuilder
import io.netty.channel.EventLoopGroup

import scala.concurrent.{ExecutionContext, Future}

object GrpcClientResource {
  def owner(port: Port): ResourceOwner[Channel] =
    for {
      eventLoopGroup <- new EventLoopGroupOwner("api-client", sys.runtime.availableProcessors())
      channel <- channelOwner(port, EventLoopGroupOwner.clientChannelType, eventLoopGroup)
    } yield channel

  private def channelOwner(
      port: Port,
      channelType: Class[_ <: io.netty.channel.Channel],
      eventLoopGroup: EventLoopGroup,
  ): ResourceOwner[Channel] =
    new ResourceOwner[Channel] {
      override def acquire()(implicit executionContext: ExecutionContext): Resource[Channel] = {
        Resource(Future {
          NettyChannelBuilder
            .forAddress(new InetSocketAddress(InetAddress.getLoopbackAddress, port.value))
            .channelType(channelType)
            .eventLoopGroup(eventLoopGroup)
            .usePlaintext()
            .directExecutor()
            .build()
        })(channel =>
          Future {
            channel.shutdownNow()
            if (!channel.awaitTermination(5, TimeUnit.SECONDS)) {
              sys.error(
                "Unable to shutdown channel to a remote API under tests. Unable to recover. Terminating.")
            }
        })
      }
    }
} 
Example 8
Source File: FreePort.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import java.net.{InetAddress, ServerSocket}

import com.daml.ports.Port

import scala.annotation.tailrec

private[postgresql] object FreePort {

  @tailrec
  def find(tries: Int = 10): PortLock.Locked = {
    val socket = new ServerSocket(0, 0, InetAddress.getLoopbackAddress)
    val portLock = try {
      val port = Port(socket.getLocalPort)
      PortLock.lock(port)
    } finally {
      socket.close()
    }
    portLock match {
      case Right(locked) =>
        socket.close()
        locked
      case Left(failure) =>
        socket.close()
        if (tries <= 1) {
          throw failure
        } else {
          find(tries - 1)
        }
    }
  }

} 
Example 9
Source File: ChaosCommands.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import java.net.InetAddress

import scala.collection.immutable.Seq
import scala.sys.process._
import scala.util.Try

trait ChaosCommands {
  private lazy val environment: Array[(String, String)] =
    Seq("boot2docker", "shellinit").lineStream
      .map(_.trim)
      .filter(_.startsWith("export "))
      .map(_.substring(7))
      .map(_.split("="))
      .foldLeft(Seq[(String, String)]()) { case (acc, kv) => acc :+ (kv(0) -> kv(1)) }.toArray

  def seedAddress(): Try[InetAddress] =
    runCommand(Seq("docker", "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", "cassandra-1"), _.!!.trim).map(InetAddress.getByName)

  private def runCommand[A](command: Seq[String], f: ProcessBuilder => A): Try[A] =
    Try(f(Process(command, None, environment: _*)))

} 
Example 10
Source File: ProxyServer.scala    From devbox   with Apache License 2.0 5 votes vote down vote up
package cmdproxy

import java.io.BufferedReader
import java.io.InputStreamReader
import java.io.OutputStreamWriter
import java.io.PrintWriter
import java.net.InetAddress
import java.net.ServerSocket
import java.net.Socket

import scala.util.Using

import devbox.logger.FileLogger
import os.RelPath
import ujson.ParseException
import upickle.default.{macroRW, ReadWriter}

case class Request(workingDir: String, cmd: Seq[String])
object Request {
  implicit val rw: ReadWriter[Request] = macroRW
}


  val localDir: Map[os.RelPath, os.Path] = dirMapping.map(_.swap).toMap

  def start(): Unit = {
    logger.info(s"Starting command proxy server, listening at ${socket.getInetAddress}:${socket.getLocalPort}")
    (new Thread("Git Proxy Thread") {
      override def run(): Unit = {
        while (!socket.isClosed) {
          Using(socket.accept()) { handleConnection } recover {
            case e: Exception =>
              logger.error(s"Error handling request ${e.getMessage}")
            case e: java.net.SocketException if e.getMessage == "Socket closed" =>
              logger.error(s"Git proxy socket closed")
          }
        }
      }
    }).start()

  }

  def handleConnection(conn: Socket): Unit = try {
    logger.info(s"Accepting connection from ${conn.getInetAddress}")
    val in = new BufferedReader(new InputStreamReader(conn.getInputStream, ProxyServer.CHARSET_NAME))
    val out = new PrintWriter(new OutputStreamWriter(conn.getOutputStream, ProxyServer.CHARSET_NAME))

    upickle.default.read[Request](in.readLine()) match {
      case Request(dir, args) =>
        val workingDir = localDir
          .collect{case (remote, local) if RelPath(dir).startsWith(remote) =>
            local / RelPath(dir).relativeTo(remote)
          }
          .head

        // being cautious here and only execute "git" commands
        if (args.headOption.exists((_ == "git"))) {
          logger.info(s"Executing `${args.mkString(" ")}` in $workingDir")

          val proc = os.proc(args).call(
            workingDir,
            mergeErrIntoOut = true,
            stdout = os.ProcessOutput.Readlines(str =>
              out.println(upickle.default.write(Left[String, Int](str)))
            ),
            check = false,
            timeout = 10000
          )

          out.println(upickle.default.write(Right[String, Int](proc.exitCode)))
        } else {
          val msg = s"Not executing non-git commend: `${args.mkString(" ")}`."
          logger.info(msg)
          out.println(upickle.default.write(Right[String, Int](1)))
        }

        out.flush()
    }
  } catch {
    case e: ParseException => logger.error(s"Error parsing incoming json request: ${e.getMessage}")
  }
}

object ProxyServer {
  val DEFAULT_PORT = 20280
  val CHARSET_NAME = "UTF-8"
} 
Example 11
Source File: SinkFunctionExample.scala    From examples-scala   with Apache License 2.0 5 votes vote down vote up
package io.github.streamingwithflink.chapter8

import java.io.PrintStream
import java.net.{InetAddress, Socket}

import io.github.streamingwithflink.util.{SensorReading, SensorSource, SensorTimeAssigner}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.flink.streaming.api.scala._

/**
  * Example program that writes sensor readings to a socket.
  *
  * NOTE: Before starting the program, you need to start a process that listens on a socket at localhost:9191.
  * On Linux, you can do that with nc (netcat) with the following command:
  *
  * nc -l localhost 9191
  */
object SinkFunctionExample {

  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    // use event time for the application
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    // configure watermark interval
    env.getConfig.setAutoWatermarkInterval(1000L)

    // ingest sensor stream
    val readings: DataStream[SensorReading] = env
      // SensorSource generates random temperature readings
      .addSource(new SensorSource)
      // assign timestamps and watermarks which are required for event time
      .assignTimestampsAndWatermarks(new SensorTimeAssigner)

    // write the sensor readings to a socket
    readings.addSink(new SimpleSocketSink("localhost", 9191))
      // set parallelism to 1 because only one thread can write to a socket
      .setParallelism(1)

    env.execute()
  }
}

/**
  * Writes a stream of [[SensorReading]] to a socket.
  */
class SimpleSocketSink(val host: String, val port: Int)
    extends RichSinkFunction[SensorReading] {

  var socket: Socket = _
  var writer: PrintStream = _

  override def open(config: Configuration): Unit = {
    // open socket and writer
    socket = new Socket(InetAddress.getByName(host), port)
    writer = new PrintStream(socket.getOutputStream)
  }

  override def invoke(
      value: SensorReading,
      ctx: SinkFunction.Context[_]): Unit = {
    // write sensor reading to socket
    writer.println(value.toString)
    writer.flush()
  }

  override def close(): Unit = {
    // close writer and socket
    writer.close()
    socket.close()
  }
} 
Example 12
Source File: DockerUtils.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{Inet4Address, InetAddress, NetworkInterface}

import scala.collection.JavaConverters._
import scala.sys.process._
import scala.util.Try

private[spark] object DockerUtils {

  def getDockerIp(): String = {
    
    def findFromDockerMachine(): Option[String] = {
      sys.env.get("DOCKER_MACHINE_NAME").flatMap { name =>
        Try(Seq("/bin/bash", "-c", s"docker-machine ip $name 2>/dev/null").!!.trim).toOption
      }
    }
    sys.env.get("DOCKER_IP")
      .orElse(findFromDockerMachine())
      .orElse(Try(Seq("/bin/bash", "-c", "boot2docker ip 2>/dev/null").!!.trim).toOption)
      .getOrElse {
        // This block of code is based on Utils.findLocalInetAddress(), but is modified to blacklist
        // certain interfaces.
        val address = InetAddress.getLocalHost
        // Address resolves to something like 127.0.1.1, which happens on Debian; try to find
        // a better address using the local network interfaces
        // getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
        // on unix-like system. On windows, it returns in index order.
        // It's more proper to pick ip address following system output order.
        val blackListedIFs = Seq(
          "vboxnet0",  // Mac
          "docker0"    // Linux
        )
        val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter { i =>
          !blackListedIFs.contains(i.getName)
        }
        val reOrderedNetworkIFs = activeNetworkIFs.reverse
        for (ni <- reOrderedNetworkIFs) {
          val addresses = ni.getInetAddresses.asScala
            .filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
          if (addresses.nonEmpty) {
            val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
            // because of Inet6Address.toHostName may add interface at the end if it knows about it
            val strippedAddress = InetAddress.getByAddress(addr.getAddress)
            return strippedAddress.getHostAddress
          }
        }
        address.getHostAddress
      }
  }
} 
Example 13
Source File: CollectionCache.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.counter.util

import java.net.InetAddress
import java.util.concurrent.TimeUnit

import com.google.common.cache.{Cache, CacheBuilder}
import org.slf4j.LoggerFactory

import scala.concurrent.{ExecutionContext, Future}
import scala.language.{postfixOps, reflectiveCalls}

case class CollectionCacheConfig(maxSize: Int, ttl: Int, negativeCache: Boolean = false, negativeTTL: Int = 600)

class CollectionCache[C <: { def nonEmpty: Boolean; def isEmpty: Boolean } ](config: CollectionCacheConfig) {
  private val cache: Cache[String, C] = CacheBuilder.newBuilder()
    .expireAfterWrite(config.ttl, TimeUnit.SECONDS)
    .maximumSize(config.maxSize)
    .build[String, C]()

//  private lazy val cache = new SynchronizedLruMap[String, (C, Int)](config.maxSize)
  private lazy val className = this.getClass.getSimpleName

  private lazy val log = LoggerFactory.getLogger(this.getClass)
  val localHostname = InetAddress.getLocalHost.getHostName

  def size = cache.size
  val maxSize = config.maxSize

  // cache statistics
  def getStatsString: String = {
    s"$localHostname ${cache.stats().toString}"
  }

  def withCache(key: String)(op: => C): C = {
    Option(cache.getIfPresent(key)) match {
      case Some(r) => r
      case None =>
        val r = op
        if (r.nonEmpty || config.negativeCache) {
          cache.put(key, r)
        }
        r
    }
  }

  def withCacheAsync(key: String)(op: => Future[C])(implicit ec: ExecutionContext): Future[C] = {
    Option(cache.getIfPresent(key)) match {
      case Some(r) => Future.successful(r)
      case None =>
        op.map { r =>
          if (r.nonEmpty || config.negativeCache) {
            cache.put(key, r)
          }
          r
        }
    }
  }

  def purgeKey(key: String) = {
    cache.invalidate(key)
  }

  def contains(key: String): Boolean = {
    Option(cache.getIfPresent(key)).nonEmpty
  }
} 
Example 14
Source File: NTP.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.time

import java.net.{InetAddress, SocketTimeoutException}
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.atomic.AtomicBoolean

import com.wavesplatform.dex.domain.utils.ScorexLogging
import monix.eval.Task
import monix.execution.schedulers.SchedulerService
import monix.execution.{ExecutionModel, Scheduler}
import org.apache.commons.net.ntp.NTPUDPClient

import scala.concurrent.duration.DurationInt

class NTP(ntpServer: String) extends Time with ScorexLogging with AutoCloseable {

  private val offsetPanicThreshold = 1000000L
  private val ExpirationTimeout    = 60.seconds
  private val RetryDelay           = 10.seconds
  private val ResponseTimeout      = 10.seconds

  private val duringShutdown = new AtomicBoolean(false)

  private implicit val scheduler: SchedulerService = Scheduler.singleThread(
    name = "time-impl",
    daemonic = false,
    executionModel = ExecutionModel.AlwaysAsyncExecution,
    reporter = {
      case _: RejectedExecutionException if duringShutdown.get() => // ignore
      case e: Throwable                                          => log.error("An uncaught error", e)
    }
  )

  private val client = new NTPUDPClient()
  client.setDefaultTimeout(ResponseTimeout.toMillis.toInt)

  @volatile private var offset = 0L
  private val updateTask: Task[Unit] = {
    def newOffsetTask: Task[Option[(InetAddress, java.lang.Long)]] = Task {
      try {
        val info = client.getTime(InetAddress.getByName(ntpServer))
        info.computeDetails()
        Option(info.getOffset).map { offset =>
          val r = if (Math.abs(offset) > offsetPanicThreshold) throw new Exception("Offset is suspiciously large") else offset
          (info.getAddress, r)
        }
      } catch {
        case _: SocketTimeoutException =>
          None
        case t: Throwable =>
          log.warn("Problems with NTP: ", t)
          None
      }
    }

    newOffsetTask.flatMap {
      case None if !scheduler.isShutdown => updateTask.delayExecution(RetryDelay)
      case Some((server, newOffset)) if !scheduler.isShutdown =>
        log.trace(s"Adjusting time with $newOffset milliseconds, source: ${server.getHostAddress}.")
        offset = newOffset
        updateTask.delayExecution(ExpirationTimeout)
      case _ => Task.unit
    }
  }

  def correctedTime(): Long = System.currentTimeMillis() + offset

  private var txTime: Long = 0

  def getTimestamp(): Long = {
    txTime = Math.max(correctedTime(), txTime + 1)
    txTime
  }

  private val taskHandle = updateTask.runAsync {
    case Left(e) => log.error(s"Error executing task", e)
    case _       =>
  }

  override def close(): Unit = if (duringShutdown.compareAndSet(false, true)) {
    log.info("Shutting down Time")
    taskHandle.cancel()
    if (client.isOpen) client.close()
    scheduler.shutdown()
  }
} 
Example 15
Source File: WavesBlockchainCachingClient.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.clients

import java.net.InetAddress
import java.time.Duration

import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.transaction.ExchangeTransaction
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.grpc.integration.caches.{AssetDescriptionsCache, FeaturesCache}
import com.wavesplatform.dex.grpc.integration.clients.WavesBlockchainClient.SpendableBalanceChanges
import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription
import monix.execution.Scheduler
import monix.reactive.Observable

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

class WavesBlockchainCachingClient(underlying: WavesBlockchainClient[Future], defaultCacheExpiration: FiniteDuration, monixScheduler: Scheduler)(
    implicit grpcExecutionContext: ExecutionContext)
    extends WavesBlockchainClient[Future]
    with ScorexLogging {

  private val cacheExpiration: Duration = Duration.ofMillis(defaultCacheExpiration.toMillis)

  private val featuresCache          = new FeaturesCache(underlying.isFeatureActivated, invalidationPredicate = !_) // we don't keep knowledge about unactivated features
  private val assetDescriptionsCache = new AssetDescriptionsCache(underlying.assetDescription, cacheExpiration)

  // TODO remove after release 2.1.3
  override def spendableBalance(address: Address, asset: Asset): Future[Long] = underlying.spendableBalance(address, asset)

  override def spendableBalanceChanges: Observable[SpendableBalanceChanges]                      = underlying.spendableBalanceChanges
  override def realTimeBalanceChanges: Observable[WavesBlockchainClient.BalanceChanges]          = underlying.realTimeBalanceChanges
  override def spendableBalances(address: Address, assets: Set[Asset]): Future[Map[Asset, Long]] = underlying.spendableBalances(address, assets)
  override def allAssetsSpendableBalance(address: Address): Future[Map[Asset, Long]]             = underlying.allAssetsSpendableBalance(address)

  override def isFeatureActivated(id: Short): Future[Boolean] = featuresCache.get(id) map Boolean2boolean

  override def assetDescription(asset: Asset.IssuedAsset): Future[Option[BriefAssetDescription]] = assetDescriptionsCache.get(asset)

  override def hasScript(asset: Asset.IssuedAsset): Future[Boolean]                                     = underlying.hasScript(asset)
  override def runScript(asset: Asset.IssuedAsset, input: ExchangeTransaction): Future[RunScriptResult] = underlying.runScript(asset, input)

  override def hasScript(address: Address): Future[Boolean]                       = underlying.hasScript(address)
  override def runScript(address: Address, input: Order): Future[RunScriptResult] = underlying.runScript(address, input)

  override def wereForged(txIds: Seq[ByteStr]): Future[Map[ByteStr, Boolean]] = underlying.wereForged(txIds)
  override def broadcastTx(tx: ExchangeTransaction): Future[Boolean]          = underlying.broadcastTx(tx)

  override def forgedOrder(orderId: ByteStr): Future[Boolean] = underlying.forgedOrder(orderId)

  override def getNodeAddress: Future[InetAddress] = underlying.getNodeAddress

  override def close(): Future[Unit] = underlying.close()
} 
Example 16
Source File: AkkaClusterConfig.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.cluster

import java.net.{UnknownHostException, InetAddress}
import java.util.{List => JList}
import scala.collection.JavaConverters._

object AkkaClusterConfig {

  // Java API
  def create(hostname:String, port:Int, seedNodes:JList[String]):AkkaClusterConfig = {
    AkkaClusterConfig(Option(hostname), port, seedNodes.asScala.toList)
  }
}

case class AkkaClusterConfig
(
  // Will be resolved if null
  private val hostname:Option[String],
  // the tcp port used on this node
  val port:Int,
  // list of all seed-nodes - basically single01 and single02 - on the following form: <host>:<port>
  // e.g in yaml:
  //    - "single01-testing.nextgentel.net:9091"
  //    - "single02-testing.nextgentel.net:9091"
  val seedNodes:List[String]
  ) {

  lazy val thisHostname:String = hostname.getOrElse(resolveHostName())

  def thisHostnameAndPort():String = thisHostname+":"+port


  /**
   * Generates akka config string use to configure the remote listening on this node,
   * and info about all other (seed) nodes.
   * It is VERY important that 'actorSystemName' is using the same name as ActorSystem.create( [name] ).
   * If this name is not correct, it will not work to connect to the remote actor system,
   * even though the host and port is correct.
   * @param actorSystemName the same name as used in ActorSystem.create( [name] ) on all nodes.
   * @return akka config string that can be used in ConfigFactory.parseString()
   */
  def generateAkkaConfig(actorSystemName:String):String = {
    if (port == null.asInstanceOf[Int]) throw new Exception("port is not specified")
    if (seedNodes == null || seedNodes.isEmpty) throw new Exception("seedNodes is not specified")

    val seedNodesString = "[" + seedNodes.map {
      hostAndPort => "\"akka.tcp://" + actorSystemName + "@" + hostAndPort + "\""
    }.mkString(",") + "]"

    s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
      |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
      |akka.remote.netty.tcp.hostname="$thisHostname"
      |akka.remote.netty.tcp.port=$port
      |akka.cluster.seed-nodes = $seedNodesString
    """.stripMargin
  }


  protected def resolveHostName(): String = {
    try {
      val addr: InetAddress = null
      return InetAddress.getLocalHost.getCanonicalHostName
    } catch {
      case ex: UnknownHostException => throw new Exception("Error resolving hostName", ex)
    }
  }

  def withSeedNodeList(newSeedNodeList:List[String]):AkkaClusterConfig = {
    new AkkaClusterConfig(Some(thisHostname), port, newSeedNodeList)
  }
} 
Example 17
Source File: SelectHiveQLByJDBCTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hive

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class SelectHiveQLByJDBCTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hive/SelectHiveQLByJDBC.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("SelectHiveQLByJdbcTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 18
Source File: JsonFolderParserTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.Json

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JsonFolderParserTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/json/jsonFolder.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 19
Source File: JsonParserTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.Json

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JsonParserTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/json/jsonParser.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 20
Source File: JsonSaveTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.Json

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JsonSaveTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/json/jsonSave.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 21
Source File: JsonStringParserTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.Json

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JsonStringParserTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/json/jsonStringParser.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 22
Source File: MysqlReadTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.JDBC

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class MysqlReadTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/jdbc/MysqlRead.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()
    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("MysqlReadTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 23
Source File: JdbcReadFromOracleTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.JDBC

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.PropertyUtil
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JdbcReadFromOracleTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/jdbc/JdbcReadFromOracle.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("JdbcReadFromOracleTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 24
Source File: OracleReadTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.JDBC

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class OracleReadTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/jdbc/OracleRead.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("OracleReadTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 25
Source File: ExecuteShellTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.script

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ExecuteShellTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/script/shell.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 26
Source File: PythonTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.script

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.ServerIpUtil
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON


class PythonTest {

  @Test
  def testPython() : Unit = {
    //parse flow json
    val file = "src/main/resources/flow/script/python.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    //execute flow
    val spark = SparkSession.builder()
      .master("local")
      //.   master("spark://10.0.86.89:7077")t
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      //.config("spark.yarn.appMasterEnv.PYSPARK_PYTHON","/usr/bin/python3")
      //.config("spark.jars","/opt/project/piflow/piflow-bundle/lib/jython-standalone-2.7.1.jar")
      .enableHiveSupport()
      .getOrCreate()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    spark.close();
    h2Server.stop()
  }

 } 
Example 27
Source File: ExecuteScalaTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.script

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil, ScalaExecutorUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ExecuteScalaTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/script/scala.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val scalaExecutorJarList = ScalaExecutorUtil.buildScalaExcutorJar(flowBean)

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[3]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 28
Source File: PythonWithDataFrameTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.script

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.ServerIpUtil
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON


class PythonWithDataFrameTest {


  @Test
  def testPythonWithDataFrame() : Unit = {
    //parse flow json
    val file = "src/main/resources/flow/script/pythonWithDataFrame.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    //execute flow
    val spark = SparkSession.builder()
      .master("local")
      //.   master("spark://10.0.86.89:7077")t
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      //.config("spark.yarn.appMasterEnv.PYSPARK_PYTHON","/usr/bin/python3")
      //.config("spark.jars","/opt/project/piflow/piflow-bundle/lib/jython-standalone-2.7.1.jar")
      .enableHiveSupport()
      .getOrCreate()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    spark.close();
    h2Server.stop()
  }


} 
Example 29
Source File: RunCypherTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.neo4j

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class RunCypherTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/neo4j/RunCypher.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("RunCypherTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 30
Source File: PutNeo4jTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.neo4j

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PutNeo4jTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/neo4j/PutNeo4j.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("HiveToNeo4jTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 31
Source File: HiveToNeo4jTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.neo4j

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class HiveToNeo4jTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/neo4j/HiveToNeo4j.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("HiveToNeo4jTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 32
Source File: LabelPropagationTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.graphx

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class LabelPropagationTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/graphx/labelpropagation.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 33
Source File: LoadGraph.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.graphx

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class LoadGraph {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/graphx/LoadGraph.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 34
Source File: ReadHbaseTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hbase

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ReadHbaseTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hbase/ReadHbase.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()
    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("MysqlReadTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 35
Source File: PutHbaseTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hbase

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PutHbaseTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hbase/PutHbase.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()
    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())

    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("MysqlReadTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 36
Source File: ReadFromRedisTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.redis

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ReadFromRedisTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/redis/ReadFromRedis.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress

    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("CsvParserTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 37
Source File: WriteToRedisTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.redis

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class WriteToRedisTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/redis/WriteToRedis.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress

    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("CsvParserTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 38
Source File: GetUrlTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.http

import java.io.{BufferedReader, InputStreamReader, PrintWriter}
import java.net.{HttpURLConnection, InetAddress, URL, URLConnection}

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.http.client.methods.{CloseableHttpResponse, HttpGet}
import org.apache.http.impl.client.HttpClients
import org.apache.http.util.EntityUtils
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class GetUrlTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/http/getUrl.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 39
Source File: PostUrlTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.http

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PostUrlTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/http/postUrl.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 40
Source File: XmlSaveTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.xml

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class XmlSaveTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/xml/xmlSave.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 41
Source File: XmlParserTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.xml

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class XmlParserTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/xml/xmlParser.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 42
Source File: XmlStringTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.xml

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class XmlStringTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/xml/xmlStringParser.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 43
Source File: XmlParserColumnsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.xml

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class XmlParserColumnsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/xml/xmlParserColumns.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 44
Source File: XmlParserFolderTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.xml

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class XmlParserFolderTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/xml/xmlParserFolder.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 45
Source File: FileTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.file

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class FileTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/file/file.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 46
Source File: RegexTextProcessTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.file

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class RegexTextProcessTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/file/regexTextProcess.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 47
Source File: PutEsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.elasticsearch

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PutEsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/es/PutEs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 48
Source File: QueryEsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.elasticsearch

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class QueryEsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/es/QueryEs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
//      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 49
Source File: SubtractTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class SubtractTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/subtract.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 50
Source File: DropFieldTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class DropFieldTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/dropField.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 51
Source File: MergeTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class MergeTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/merge.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 52
Source File: MockDataTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class MockDataTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/mockData.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 53
Source File: AddUUIDTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle

import java.net.InetAddress
import java.util.ArrayList

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class AddUUIDTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/uuid.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 54
Source File: SelectFieldTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class SelectFieldTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/selectField.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 55
Source File: ConvertSchemaTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ConvertSchemaTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/convertSchema.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 56
Source File: JoinTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class JoinTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/join.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 57
Source File: FilterTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class FilterTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/filter.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 58
Source File: ForkTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ForkTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/fork.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 59
Source File: ExecuteSQLTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ExecuteSQLTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/executeSql.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 60
Source File: DistinctTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class DistinctTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/distinct.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 61
Source File: RouteTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.common

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class RouteTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/common/route.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 62
Source File: GetFromSolrTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.solr

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class GetFromSolrTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/solr/GetFromSolr.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress

    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("GetFromSolrTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 63
Source File: PutIntoSolrTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.solr

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PutIntoSolrTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/solr/PutIntoSolr.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()

    val ip = InetAddress.getLocalHost.getHostAddress

    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort", "50001").start()

    //execute flow
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("GetFromSolrTest")
      .config("spark.driver.memory", "1g")
      .config("spark.executor.memory", "2g")
      .config("spark.cores.max", "2")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }

} 
Example 64
Source File: DeleteHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class DeleteHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/deleteHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 65
Source File: GetHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class GetHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/getHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 66
Source File: SelectFilesByNameTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class SelectFilesByNameTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/selectFileByName.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 67
Source File: FileDownhdfsHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class FileDownhdfsHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/fileDownHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 68
Source File: UnzipFilesonHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class UnzipFilesonHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/unzipFilesOnHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 69
Source File: SaveToHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class SaveToHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/saveToHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 70
Source File: ListHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class ListHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/listHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 71
Source File: PutHdfsTest.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.bundle.hdfs

import java.net.InetAddress

import cn.piflow.Runner
import cn.piflow.conf.bean.FlowBean
import cn.piflow.conf.util.{FileUtil, OptionUtil}
import cn.piflow.util.{PropertyUtil, ServerIpUtil}
import org.apache.spark.sql.SparkSession
import org.h2.tools.Server
import org.junit.Test

import scala.util.parsing.json.JSON

class PutHdfsTest {

  @Test
  def testFlow(): Unit ={

    //parse flow json
    val file = "src/main/resources/flow/hdfs/putHdfs.json"
    val flowJsonStr = FileUtil.fileReader(file)
    val map = OptionUtil.getAny(JSON.parseFull(flowJsonStr)).asInstanceOf[Map[String, Any]]
    println(map)

    //create flow
    val flowBean = FlowBean(map)
    val flow = flowBean.constructFlow()


    val ip = InetAddress.getLocalHost.getHostAddress
    cn.piflow.util.FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    val h2Server = Server.createTcpServer("-tcp", "-tcpAllowOthers", "-tcpPort","50001").start()
    //execute flow
    val spark = SparkSession.builder()
      .master("local[12]")
      .appName("hive")
      .config("spark.driver.memory", "4g")
      .config("spark.executor.memory", "8g")
      .config("spark.cores.max", "8")
      .config("hive.metastore.uris",PropertyUtil.getPropertyValue("hive.metastore.uris"))
      .enableHiveSupport()
      .getOrCreate()

    val process = Runner.create()
      .bind(classOf[SparkSession].getName, spark)
      .bind("checkpoint.path", "")
      .bind("debug.path","")
      .start(flow);

    process.awaitTermination();
    val pid = process.pid();
    println(pid + "!!!!!!!!!!!!!!!!!!!!!")
    spark.close();
  }


} 
Example 72
Source File: ServerIpUtil.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.util

import java.io.{File, FileInputStream, InputStream}
import java.net.InetAddress
import java.util.Properties

object ServerIpUtil {
  private val prop: Properties = new Properties()
  var fis: InputStream = null
  var path :String = ""

    try{

    val userDir = System.getProperty("user.dir")
    path = userDir + "/server.ip"
    val file = new File(path)
    if(!file.exists()){
      file.createNewFile()
    }
    prop.load(new FileInputStream(path))
  } catch{
    case ex: Exception => ex.printStackTrace()
  }

  def getServerIpFile() : String = {
    path
  }


  def getServerIp(): String ={
    val obj = prop.get("server.ip")
    if(obj != null){
      return obj.toString
    }
    null
  }

  def main(args: Array[String]): Unit = {

    val ip = InetAddress.getLocalHost.getHostAddress
    //write ip to server.ip file
    FileUtil.writeFile("server.ip=" + ip, ServerIpUtil.getServerIpFile())
    println(ServerIpUtil.getServerIp())
  }
} 
Example 73
Source File: V1ClientConfigurationUtils.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import java.net.InetAddress

import akka.actor.DynamicAccess
import com.amazonaws.{ ClientConfiguration, Protocol, ProxyAuthenticationMethod }
import com.github.j5ik2o.akka.persistence.dynamodb.client.v1.{
  DnsResolverProvider,
  RetryPolicyProvider,
  SecureRandomProvider
}
import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig

import scala.concurrent.duration.Duration
import scala.jdk.CollectionConverters._

object V1ClientConfigurationUtils {

  def setup(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): ClientConfiguration = {
    val result = new ClientConfiguration()
    import pluginConfig.clientConfig.v1ClientConfig.clientConfiguration._
    protocol.foreach { v => result.setProtocol(v) }
    result.setMaxConnections(maxConnections)
    userAgentPrefix.foreach { v => result.setUserAgentPrefix(v) }
    userAgentSuffix.foreach { v => result.setUserAgentSuffix(v) }
    localAddress.foreach { v => result.setLocalAddress(InetAddress.getByName(v)) }
    proxyProtocol.foreach { v => result.setProtocol(Protocol.valueOf(v)) }
    proxyHost.foreach { v => result.setProxyHost(v) }
    proxyPort.foreach { v => result.setProxyPort(v) }
    disableSocketProxy.foreach { v => result.setDisableSocketProxy(v) }
    proxyUsername.foreach { v => result.setProxyUsername(v) }
    proxyPassword.foreach { v => result.setProxyPassword(v) }
    proxyDomain.foreach { v => result.setProxyDomain(v) }
    proxyWorkstation.foreach { v => result.setProxyWorkstation(v) }
    nonProxyHosts.foreach { v => result.setNonProxyHosts(v) }
    if (proxyAuthenticationMethods.nonEmpty) {
      val seq = proxyAuthenticationMethods.map(ProxyAuthenticationMethod.valueOf)
      result.setProxyAuthenticationMethods(seq.asJava)
    }
    RetryPolicyProvider.create(dynamicAccess, pluginConfig).foreach { p => result.setRetryPolicy(p.create) }
    maxErrorRetry.foreach { v => result.setMaxErrorRetry(v) }
    retryMode.foreach { v => result.setRetryMode(v) }
    if (socketTimeout != Duration.Zero)
      result.setSocketTimeout(socketTimeout.toMillis.toInt)
    if (connectionTimeout != Duration.Zero)
      result
        .setConnectionTimeout(connectionTimeout.toMillis.toInt)
    result
      .setRequestTimeout(requestTimeout.toMillis.toInt)
    if (clientExecutionTimeout != Duration.Zero)
      result
        .setClientExecutionTimeout(clientExecutionTimeout.toMillis.toInt)
    result.setUseReaper(useReaper)

    //* public ClientConfiguration withThrottledRetries(boolean use) {
    result.setMaxConsecutiveRetriesBeforeThrottling(maxConsecutiveRetriesBeforeThrottling)
    result.setUseGzip(useGzip)
    socketBufferSizeHint.foreach { v => result.setSocketBufferSizeHints(v.send, v.receive) }
    signerOverride.foreach { v => result.setSignerOverride(v) }
    // * public ClientConfiguration withPreemptiveBasicProxyAuth(boolean preemptiveBasicProxyAuth) {
    connectionTtl.foreach { v =>
      if (v != Duration.Zero)
        result.setConnectionTTL(v.toMillis)
    }
    if (connectionMaxIdle != Duration.Zero)
      result.setConnectionMaxIdleMillis(connectionMaxIdle.toMillis)
    if (validateAfterInactivity != Duration.Zero)
      result.setValidateAfterInactivityMillis(validateAfterInactivity.toMillis.toInt)
    result.setUseTcpKeepAlive(tcpKeepAlive)
    val dnsResolverProvider = DnsResolverProvider.create(dynamicAccess, pluginConfig)
    dnsResolverProvider.create.foreach { dnsResolver => result.setDnsResolver(dnsResolver) }
    result.setCacheResponseMetadata(cacheResponseMetadata)
    result.setResponseMetadataCacheSize(responseMetadataCacheSize)
    if (useSecureRandom) {
      val secureRandomProvider = SecureRandomProvider.create(dynamicAccess, pluginConfig)
      result.setSecureRandom(secureRandomProvider.create)
    }
    result.setUseExpectContinue(useExpectContinue)
    headers.foreach {
      case (k, v) =>
        result.addHeader(k, v)
    }
    disableHostPrefixInjection.foreach { v => result.setDisableHostPrefixInjection(v) }
    // * public ClientConfiguration withTlsKeyManagersProvider(TlsKeyManagersProvider tlsKeyManagersProvider) {

    result
  }

} 
Example 74
Source File: V2HttpClientBuilderUtils.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.utils

import java.net.InetAddress
import java.time.{ Duration => JavaDuration }

import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig
import software.amazon.awssdk.http.Protocol
import software.amazon.awssdk.http.apache.ApacheHttpClient
import software.amazon.awssdk.http.nio.netty.{ Http2Configuration, NettyNioAsyncHttpClient, SdkEventLoopGroup }

import scala.concurrent.duration.Duration

object V2HttpClientBuilderUtils {

  def setupSync(pluginConfig: PluginConfig): ApacheHttpClient.Builder = {
    import pluginConfig.clientConfig.v2ClientConfig.syncClientConfig._
    val result = ApacheHttpClient.builder()

    if (socketTimeout != Duration.Zero)
      result.socketTimeout(JavaDuration.ofMillis(socketTimeout.toMillis))
    if (socketTimeout != Duration.Zero)
      result.connectionTimeout(JavaDuration.ofMillis(connectionTimeout.toMillis))
    if (socketTimeout != Duration.Zero)
      result.connectionAcquisitionTimeout(JavaDuration.ofMillis(connectionAcquisitionTimeout.toMillis))

    result.maxConnections(maxConnections)

    localAddress.foreach { v => result.localAddress(InetAddress.getByName(v)) }
    expectContinueEnabled.foreach { v => result.expectContinueEnabled(v) }

    if (connectionTimeToLive != Duration.Zero)
      result.connectionTimeToLive(JavaDuration.ofMillis(connectionTimeToLive.toMillis))
    if (maxIdleConnectionTimeout != Duration.Zero)
      result.connectionMaxIdleTime(JavaDuration.ofMillis(maxIdleConnectionTimeout.toMillis))

    result.useIdleConnectionReaper(useConnectionReaper)

//    Builder httpRoutePlanner(HttpRoutePlanner proxyConfiguration);
//    Builder credentialsProvider(CredentialsProvider credentialsProvider);
//    Builder tlsKeyManagersProvider(TlsKeyManagersProvider tlsKeyManagersProvider);
//    Builder tlsTrustManagersProvider(TlsTrustManagersProvider tlsTrustManagersProvider);
    result
  }

  def setupAsync(pluginConfig: PluginConfig): NettyNioAsyncHttpClient.Builder = {
    val result = NettyNioAsyncHttpClient.builder()
    import pluginConfig.clientConfig.v2ClientConfig.asyncClientConfig._
    result.maxConcurrency(maxConcurrency)
    result.maxPendingConnectionAcquires(maxPendingConnectionAcquires)

    if (readTimeout != Duration.Zero)
      result.readTimeout(JavaDuration.ofMillis(readTimeout.toMillis))
    if (writeTimeout != Duration.Zero)
      result.writeTimeout(JavaDuration.ofMillis(writeTimeout.toMillis))
    if (connectionTimeout != Duration.Zero)
      result.connectionTimeout(JavaDuration.ofMillis(connectionTimeout.toMillis))
    if (connectionAcquisitionTimeout != Duration.Zero)
      result.connectionAcquisitionTimeout(JavaDuration.ofMillis(connectionAcquisitionTimeout.toMillis))
    if (connectionTimeToLive != Duration.Zero)
      result.connectionTimeToLive(JavaDuration.ofMillis(connectionTimeToLive.toMillis))
    if (maxIdleConnectionTimeout != Duration.Zero)
      result.connectionMaxIdleTime(JavaDuration.ofMillis(maxIdleConnectionTimeout.toMillis))

    result.useIdleConnectionReaper(useConnectionReaper)
    if (useHttp2)
      result.protocol(Protocol.HTTP2)
    else
      result.protocol(Protocol.HTTP1_1)
    val http2Builder = Http2Configuration.builder()
    http2Builder.maxStreams(http2MaxStreams)
    http2Builder.initialWindowSize(http2InitialWindowSize)
    http2HealthCheckPingPeriod.foreach(v => http2Builder.healthCheckPingPeriod(JavaDuration.ofMillis(v.toMillis)))
    result.http2Configuration(http2Builder.build())
    threadsOfEventLoopGroup.foreach(v => result.eventLoopGroup(SdkEventLoopGroup.builder().numberOfThreads(v).build()))
    result

  }

} 
Example 75
Source File: Partitioner.scala    From spark-solr   with Apache License 2.0 5 votes vote down vote up
package com.lucidworks.spark

import java.net.InetAddress

import com.lucidworks.spark.rdd.SolrRDD
import com.lucidworks.spark.util.SolrSupport
import org.apache.solr.client.solrj.SolrQuery
import org.apache.spark.Partition

import scala.collection.mutable.ArrayBuffer

// Is there a need to override {@code Partitioner.scala} and define our own partition id's
object SolrPartitioner {

  def getShardPartitions(shards: List[SolrShard], query: SolrQuery) : Array[Partition] = {
    shards.zipWithIndex.map{ case (shard, i) =>
      // Chose any of the replicas as the active shard to query
      SelectSolrRDDPartition(i, "*", shard, query, SolrRDD.randomReplica(shard))}.toArray
  }

  def getSplitPartitions(
      shards: List[SolrShard],
      query: SolrQuery,
      splitFieldName: String,
      splitsPerShard: Int): Array[Partition] = {
    var splitPartitions = ArrayBuffer.empty[SelectSolrRDDPartition]
    var counter = 0
    shards.foreach(shard => {
      val splits = SolrSupport.getShardSplits(query, shard, splitFieldName, splitsPerShard)
      splits.foreach(split => {
        splitPartitions += SelectSolrRDDPartition(counter, "*", shard, split.query, split.replica)
        counter = counter + 1
      })
    })
    splitPartitions.toArray
  }

  // Workaround for SOLR-10490. TODO: Remove once fixed
  def getExportHandlerPartitions(
      shards: List[SolrShard],
      query: SolrQuery): Array[Partition] = {
    shards.zipWithIndex.map{ case (shard, i) =>
      // Chose any of the replicas as the active shard to query
      ExportHandlerPartition(i, shard, query, SolrRDD.randomReplica(shard), 0, 0)}.toArray
  }

  // Workaround for SOLR-10490. TODO: Remove once fixed
  def getExportHandlerPartitions(
      shards: List[SolrShard],
      query: SolrQuery,
      splitFieldName: String,
      splitsPerShard: Int): Array[Partition] = {
    val splitPartitions = ArrayBuffer.empty[ExportHandlerPartition]
    var counter = 0
    shards.foreach(shard => {
      // Form a continuous iterator list so that we can pick different replicas for different partitions in round-robin mode
      val splits = SolrSupport.getExportHandlerSplits(query, shard, splitFieldName, splitsPerShard)
      splits.foreach(split => {
        splitPartitions += ExportHandlerPartition(counter, shard, split.query, split.replica, split.numWorkers, split.workerId)
        counter = counter+1
      })
    })
    splitPartitions.toArray
  }

}

case class SolrShard(shardName: String, replicas: List[SolrReplica])

case class SolrReplica(
    replicaNumber: Int,
    replicaName: String,
    replicaUrl: String,
    replicaHostName: String,
    locations: Array[InetAddress]) {
  def getHostAndPort(): String = {replicaHostName.substring(0, replicaHostName.indexOf('_'))}
  override def toString(): String = {
    return s"SolrReplica(${replicaNumber}) ${replicaName}: url=${replicaUrl}, hostName=${replicaHostName}, locations="+locations.mkString(",")
  }
} 
Example 76
Source File: HttpPayload.scala    From algoliasearch-client-scala   with MIT License 5 votes vote down vote up
package algolia.http

import java.net.InetAddress

import algolia.objects.RequestOptions
import io.netty.resolver.NameResolver
import io.lemonlabs.uri.dsl._
import org.asynchttpclient.{Request, RequestBuilder}

private[algolia] sealed trait HttpVerb

private[algolia] case object GET extends HttpVerb {
  override def toString: String = "GET"
}

private[algolia] case object POST extends HttpVerb {
  override def toString: String = "POST"
}

private[algolia] case object PUT extends HttpVerb {
  override def toString: String = "PUT"
}

private[algolia] case object DELETE extends HttpVerb {
  override def toString: String = "DELETE"
}

private[algolia] case class HttpPayload(
    verb: HttpVerb,
    path: Seq[String],
    queryParameters: Option[Map[String, String]] = None,
    body: Option[String] = None,
    isSearch: Boolean,
    isAnalytics: Boolean = false,
    isInsights: Boolean = false,
    isRecommendation: Boolean = false,
    requestOptions: Option[RequestOptions]
) {

  def apply(
      host: String,
      headers: Map[String, String],
      dnsNameResolver: NameResolver[InetAddress]
  ): Request = {
    val uri = path.foldLeft(host)((url, p) => url / p)

    var builder: RequestBuilder =
      new RequestBuilder().setMethod(verb.toString).setUrl(uri)

    headers.foreach { case (k, v) => builder = builder.addHeader(k, v) }

    // Needed to properly request the Analytics API that is behind GCP and
    // which always expects to have the Content-Length header of POST
    // requests set, even the ones whose body is empty.
    if (verb == POST && body.isEmpty) {
      builder.addHeader("Content-Length", "0")
    }

    queryParameters.foreach(
      _.foreach { case (k, v) => builder = builder.addQueryParam(k, v) }
    )

    requestOptions.foreach { r =>
      r.generateExtraHeaders().foreach {
        case (k, v) => builder = builder.addHeader(k, v)
      }
      r.generateExtraQueryParameters().foreach {
        case (k, v) => builder = builder.addQueryParam(k, v)
      }
    }

    body.foreach(b => builder = builder.setBody(b))

    builder.setNameResolver(dnsNameResolver).build()
  }

  def toString(host: String): String = {
    val _path = path.foldLeft("")(_ + "/" + _)
    val _query = queryParameters.fold("")(_.foldLeft("") {
      case (acc, (k, v)) => s"$acc&$k=$v"
    })
    val _body = body.map(b => s", '$b'").getOrElse("")

    s"$verb $host${_path}${_query}${_body}"
  }

} 
Example 77
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.net.{ InetSocketAddress, InetAddress }
import java.nio.channels.AsynchronousChannelGroup
import java.util.concurrent.Executors

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Resource, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200
  final val port = 8080

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(port, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val s = sockets.map { socket =>
          Stream.resource(socket).flatMap { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val tsk: IO[BitVector] = bvs.compile.toVector.map(_.foldLeft(BitVector.empty)(_ ++ _))
            val request: IO[Request] = tsk.flatMap { bv =>
              Codec[Request].decode(bv).fold(
                err => IO.raiseError(new Exception(err.toString)),
                result => IO.pure(result.value)
              )
            }
            val response: IO[Response] = request.flatMap(logic)
            val encoded: Stream[IO, Byte] = Stream.eval(response)
              .map(r => Codec[Response].encode(r).require)
              .flatMap { bv => Stream.chunk(Chunk.bytes(bv.bytes.toArray)) }
            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }
        s.parJoin[IO, Unit](maxClients)
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case RandomNumber(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          Number(v)
        }
      } else if (min === max) {
        IO.pure(Number(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case ReSeed(s) =>
      IO {
        rnd.setSeed(s)
        Ok
      }
  }
} 
Example 78
Source File: Client.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetAddress, InetSocketAddress }

import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration._

import cats.effect.{ IO, ContextShift }

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.{ ByteString }

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import fs2.interop.reactivestreams._

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Seed, Random }

object Client {

  val reqCodec: StreamEncoder[Request] = streamEncoderFromReified[Request]
  val resCodec: StreamDecoder[Response] = streamDecoderFromReified[Response]

  def main(args: Array[String]): Unit = {
    implicit val sys: ActorSystem = ActorSystem("ClientSystem")
    implicit val mat: Materializer = ActorMaterializer()
    try {
      val resp = Await.result(client(1234), 10.seconds)
      println(resp)
    } finally {
      sys.terminate()
    }
  }

  def client(port: Int)(implicit sys: ActorSystem, mat: Materializer): Future[Vector[Response]] = {
    val addr = new InetSocketAddress(InetAddress.getLoopbackAddress, port)
    Tcp().outgoingConnection(addr).joinMat(logic)(Keep.right).run()
  }

  def logic(implicit sys: ActorSystem): Flow[ByteString, ByteString, Future[Vector[Response]]] = {

    implicit val cs: ContextShift[IO] = IO.contextShift(sys.dispatcher)

    val requests = fs2.Stream(Seed(0xabcdL), Random(1, 100)).covary[IO]
    val source = Source
      .fromPublisher(reqCodec.encode(requests).toUnicastPublisher())
      .map(bv => ByteString.fromArrayUnsafe(bv.toByteArray))

    // TODO: this would be much less ugly, if we had a decoder `Flow`
    val buffer = fs2.concurrent.Queue.unbounded[IO, Option[BitVector]].unsafeRunSync()
    val decode: Flow[ByteString, Response, NotUsed] = Flow.fromSinkAndSource(
      Sink.onComplete { _ =>
        buffer.enqueue1(None).unsafeRunSync()
      }.contramap[ByteString] { x =>
        buffer.enqueue1(Some(BitVector.view(x.toArray))).unsafeRunSync()
      },
      Source.fromPublisher(buffer
        .dequeue
        .unNoneTerminate
        .through(decPipe[IO, Response])
        .toUnicastPublisher()
      )
    )
    val sink: Sink[ByteString, Future[Vector[Response]]] = decode.toMat(
      Sink.fold(Vector.empty[Response])(_ :+ _)
    )(Keep.right)

    Flow.fromSinkAndSourceMat(sink, source)(Keep.left)
  }
} 
Example 79
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.net.{ InetSocketAddress, InetAddress }

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }

import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }

import Protocol.v1.{ Request, Response, Random, Seed, RandInt, Seeded }

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  val reqCodec: StreamDecoder[Request] = streamDecoderFromReified[Request]
  val resCodec: StreamEncoder[Response] = streamEncoderFromReified[Response]

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(1234, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] =
    serveAddr(port, sg).as(())

  def serveAddr(port: Int, sg: tcp.SocketGroup): Stream[IO, InetSocketAddress] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val x = sockets.flatMap { socket =>
          Stream.resource(socket).map { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val requests: Stream[IO, Request] = bvs.through(decPipe[IO, Request])
            val responses: Stream[IO, Response] = requests.flatMap(req => Stream.eval(logic(req)))
            val encoded: Stream[IO, Byte] = resCodec.encode(responses).flatMap { bv =>
              Stream.chunk(Chunk.bytes(bv.bytes.toArray))
            }

            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }

        Stream.emit(localAddr) ++ x.parJoin(maxClients).drain
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case Random(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          RandInt(v)
        }
      } else if (min === max) {
        IO.pure(RandInt(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case Seed(s) =>
      IO {
        rnd.setSeed(s)
        Seeded
      }
  }
} 
Example 80
Source File: SparkCassSSTableLoaderClientManager.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra.client

import java.net.InetAddress

import com.datastax.spark.connector.cql.{ AuthConf, CassandraConnector }
import com.github.jparkie.spark.cassandra.conf.SparkCassServerConf
import org.apache.spark.Logging

import scala.collection.mutable

private[cassandra] trait SparkCassSSTableLoaderClientManager extends Serializable with Logging {
  case class SessionKey(
    hosts:               Set[InetAddress],
    port:                Int,
    authConf:            AuthConf,
    sparkCassServerConf: SparkCassServerConf
  ) extends Serializable

  @transient
  private[client] val internalClients = mutable.HashMap.empty[SessionKey, SparkCassSSTableLoaderClient]

  private[client] def buildSessionKey(
    cassandraConnector:  CassandraConnector,
    sparkCassServerConf: SparkCassServerConf
  ): SessionKey = {
    SessionKey(cassandraConnector.hosts, cassandraConnector.port, cassandraConnector.authConf, sparkCassServerConf)
  }

  private[client] def buildClient(
    cassandraConnector:  CassandraConnector,
    sparkCassServerConf: SparkCassServerConf
  ): SparkCassSSTableLoaderClient = {
    val newSession = cassandraConnector.openSession()

    logInfo(s"Created SSTableLoaderClient to the following Cassandra nodes: ${cassandraConnector.hosts}")

    val sparkCassSSTableLoaderClient = new SparkCassSSTableLoaderClient(newSession, sparkCassServerConf)

    sys.addShutdownHook {
      logInfo("Closed Cassandra Session for SSTableLoaderClient.")

      sparkCassSSTableLoaderClient.stop()
    }

    sparkCassSSTableLoaderClient
  }

  
  private[cassandra] def evictAll(): Unit = synchronized {
    internalClients.values.foreach(_.stop())
    internalClients.clear()
  }
}

object SparkCassSSTableLoaderClientManager extends SparkCassSSTableLoaderClientManager 
Example 81
Source File: CassandraServerSpecLike.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra

import java.net.{ InetAddress, InetSocketAddress }

import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }

trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
  // Remove protected modifier because of SharedSparkContext.
  override def beforeAll(): Unit = {
    super.beforeAll()

    EmbeddedCassandraServerHelper.startEmbeddedCassandra()
  }

  // Remove protected modifier because of SharedSparkContext.
  override def afterAll(): Unit = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()

    super.afterAll()
  }

  def getClusterName: String = {
    EmbeddedCassandraServerHelper.getClusterName
  }

  def getHosts: Set[InetAddress] = {
    val temporaryAddress =
      new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
        .getAddress

    Set(temporaryAddress)
  }

  def getNativeTransportPort: Int = {
    EmbeddedCassandraServerHelper.getNativeTransportPort
  }

  def getRpcPort: Int = {
    EmbeddedCassandraServerHelper.getRpcPort
  }

  def getCassandraConnector: CassandraConnector = {
    CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
  }

  def createKeyspace(session: Session, keyspace: String): Unit = {
    session.execute(
      s"""CREATE KEYSPACE "$keyspace"
          |WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
       """.stripMargin
    )
  }
} 
Example 82
Source File: Timing.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits._

case class TimeClass() extends ClassLogging with Timing {
  def b(id: RequestId): Unit = {
    Time(id, "bbb") {
      Thread.sleep(100)
    }
  }

  def a(id: RequestId): Unit = {
    Time(id, "top") {
      Time(id, "aaa1") {
        Thread.sleep(200)
      }
      b(id)
      Time(id, "aaa2") {
        Thread.sleep(300)
      }
      b(id)
    }
  }
}

case class FutureClass() extends ClassLogging with Timing {
  def demo(id: RequestId): Future[Int] = {
    val token = time.start(id, "top")
    val f1 = Future {
      Time(id, "f1") {
        Thread.sleep(100)
        100
      }
    }
    val f2 = f1.map { i =>
      val result = Time(id, "f2") {
        Thread.sleep(200)
        Time(id, "f2a") {
           i * 2
        }
      }
      result
    }
    val f3 = f2.recover{ case ex:Throwable =>
      log.error("Timing test failed", ex)
      -1
    }
    f3.map {  i =>
      time.end(id, "top", token)
      i
    }
  }
}

object Timing {

  case class F() extends ClassLogging {
    val fc = FutureClass()
    val f = fc.demo(RequestId())
    val i = Await.result(f, 3 seconds)
    log.info(Map("@msg" -> "Future result", "val" -> i))
  }

  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val tc = new TimeClass()
    tc.a(RequestId())
    tc.a(RequestId())

    F()


    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 83
Source File: ActorDemo.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{Props, Actor, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await


object DemoActor {
  def props() = Props(new DemoActor())
}

class DemoActor() extends Actor with ActorLogging {
  println(this.getClass.getSimpleName)

  def receive = {
    case "foo" => log.info("Saw foo")
    case "done" => context.stop(self)
    case x: Any => log.error(Map("@msg" -> "Unexpected actor message",
      "message" -> x.toString))
  }
}

case class ActorDemo(system: ActorSystem) {
  def demo(): Unit = {
    val a = system.actorOf(DemoActor.props(), name = "Demo")
    a ! "foo"
    a ! "bar"
    a ! "done"
  }
}

object ActorDemo {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val act = ActorDemo(system)
    act.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 84
Source File: Exceptions.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

case class MyException(msg: RichMsg) extends RichException(msg)

case class ExceptionClass() extends ClassLogging {
  def demo(): Unit = {
    log.error("Test", new Exception("Bad Code"))
    log.warn("Rich", RichException(Map("@msg" -> "Fail", "count" -> 23)))
    log.error("Special", MyException(Map("@msg" -> "Fail", "count" -> 23)))
    try {
      throw MyException(Map("@msg" -> "Die", "name" -> "abc"))
    } catch {
      case ex: Exception => log.error("Caught exception", ex)
    }
  }
}

object Exceptions {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val ec = new ExceptionClass()
    ec.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 85
Source File: OtherApis.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{Props, Actor, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await
import org.slf4j.LoggerFactory

case class Slf4jDemo() {
  val slf4jlog = LoggerFactory.getLogger(classOf[Slf4jDemo])

  def demo(): Unit = {
    slf4jlog.warn("slf4j")
  }
}

object AkkaActor {
  def props() = Props(new AkkaActor())
}

class AkkaActor() extends Actor with akka.actor.ActorLogging {
  def receive = {
    case "foo" => log.warning("Saw foo")
    case "done" => context.stop(self)
    case x: Any => log.error(s"Unexpected actor message: ${x}")
  }
}

case class AkkaDemo(system: ActorSystem) {
  def demo(): Unit = {
    val a = system.actorOf(AkkaActor.props(), name="Demo")
    a ! "foo"
    a ! "bar"
    a ! "done"
  }

}

object OtherApis {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val slf = Slf4jDemo()
    slf.demo()

    val act = AkkaDemo(system)
    act.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 86
Source File: Appender.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{ActorRefFactory, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Future

case class AppenderClass() extends ClassLogging {

  def demo(): Unit = {
    log.info("Test")
    log.error("Foo failed")
    log.warn(Map("@msg" -> "fail", "value" -> 23))
  }
}

object FlatAppender extends LogAppenderBuilder {
  def apply(factory: ActorRefFactory, stdHeaders: Map[String, RichMsg])
  = new FlatAppender(factory, stdHeaders)
}

class FlatAppender(factory: ActorRefFactory, stdHeaders: Map[String, RichMsg]) extends LogAppender {

  def append(msg: Map[String, RichMsg], category: String) {
    if (category == "common") {
      val level = msg.get("@severity") match {
        case Some(s: String) => s
        case _ => "???"
      }
      val time = msg.get("@timestamp") match {
        case Some(s: String) => s
        case _ => "???"
      }
      val message = richToString(msg.getOrElse("msg","???"))
      println(s"$time\t$level\t$message")
    }
  }

  def finish(): Future[Unit] = Future.successful(())

  def stop(): Future[Unit] = Future.successful(())
}

object Appender {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name,
      BuildInfo.version, host, appenderBuilders = Seq(FileAppender, FlatAppender))

    val sc = new SimpleClass()
    sc.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 87
Source File: Alternative.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class AltClass() extends ClassLogging {

  def demo(): Unit = {
    log.alternative("foo", Map("message"->"test"))
    log.alternative("foo", Map("a" -> "x", "b" -> false, "c" -> 65))
    log.alternative("bar", Map("message"->"btest"))
  }
}

object Alternative {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val alt = new AltClass()
    alt.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 88
Source File: RequestIds.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class RequestB() extends ClassLogging {

  def demo(id: AnyId): Unit = {
    log.trace("In B", id = id)
    log info("BBB", id = id)
  }
}

case class RequestC() extends ClassLogging {
  def demo(id: AnyId): Unit = {
    log.trace("In C", id = id)
    log.info("CCC", id = id)
  }
}

case class RequestA() extends ClassLogging {
  val b = RequestB()
  val c = RequestC()

  def demo(id: AnyId): Unit = {
    log.trace("Enter A", id = id)
    b.demo(id)
    log.info("AAA", id = id)
    c.demo(id)
    log.trace("Exit A", id = id)
  }
}

object RequestIds {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val a = new RequestA()
    a.demo(noId)
    a.demo(RequestId())
    a.demo(RequestId(level = Some(LoggingLevels.TRACE)))

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 89
Source File: Simple.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class SimpleClass() extends ClassLogging {

  def demo(): Unit = {
    log.info("Test")
    log.error("Foo failed")
    log.warn(Map("@msg" -> "fail", "value" -> 23))
  }
}

object Simple {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val sc = new SimpleClass()
    sc.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 90
Source File: Filter.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class Class1() extends ClassLogging {
  def demo(name: String): Unit = {
    log.debug(name)
    log.warn(name)
  }
}

case class Class2() extends ClassLogging {
  def demo(name: String): Unit = {
    log.debug(name)
    log.warn(name)
  }
}

object Filter {

  import LoggingLevels._


  def filter(fields: Map[String, RichMsg], level:Level): Boolean = {
    val cls = fields.get("class") match {
      case Some(s: String) => s
      case _ => ""
    }
    if (cls == "demo.test.Class1") {
      level >= DEBUG
    } else {
      level >= WARN
    }
  }

  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val c1 = new Class1()
    val c2 = new Class2()

    c1.demo("no filter")
    c2.demo("no filter")

    // Add filter and change level
    val oldLevel = loggingSystem.getLevel.current
    loggingSystem.setFilter(Some(filter))
    loggingSystem.setLevel(DEBUG)

    c1.demo("filter")
    c2.demo("filter")

    // Reset it back
    loggingSystem.setLevel(oldLevel)
    loggingSystem.setFilter(None)

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 91
Source File: TestKafka.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package com.persist.logging.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging.kafka.KafkaAppender
import com.persist.logging._
import kafka_logging_demo.BuildInfo
import scala.language.postfixOps
import scala.concurrent.duration._
import scala.concurrent.Await

case class TestKafka() extends ClassLogging {
  def send: Unit = {
    for (i <- 1 to 5) {
      log.warn(Map("msg" -> "test", "i" -> i))
      Thread.sleep(500)
    }
  }
}

object TestKafka {
  def main(args: Array[String]): Unit = {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name,
      BuildInfo.version, host, appenderBuilders = Seq(StdOutAppender, KafkaAppender))

    val tc = TestKafka()
    tc.send

    //Thread.sleep(60000)

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 92
Source File: BlackList.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.InetAddress
import encry.network.BlackList.BanType.{PermanentBan, TemporaryBan}
import encry.network.BlackList._
import encry.settings.EncryAppSettings

final case class BlackList(settings: EncryAppSettings,
                           private val blackList: Map[InetAddress, (BanReason, BanTime, BanType)]) {

  def contains(peer: InetAddress): Boolean = blackList.contains(peer)

  def banPeer(reason: BanReason, peer: InetAddress): BlackList =
    BlackList(settings, blackList.updated(peer, (reason, BanTime(System.currentTimeMillis()), reason match {
      case _ => TemporaryBan
    })))

  def cleanupBlackList: BlackList = BlackList(settings, blackList.filterNot { case (_, (_, banTime, banType)) =>
    banType != PermanentBan && (System.currentTimeMillis() - banTime.time >= settings.blackList.banTime.toMillis)
  })

  def remove(peer: InetAddress): BlackList = BlackList(settings, blackList - peer)

  def collect[T](p: (InetAddress, BanReason, BanTime, BanType) => Boolean,
                 f: (InetAddress, BanReason, BanTime, BanType) => T): Seq[T] = blackList
    .collect { case (add, (r, t, bt)) if p(add, r, t, bt) => f(add, r, t, bt) }
    .toSeq

  def getAll: Seq[(InetAddress, (BanReason, BanTime, BanType))] = blackList.toSeq
}

object BlackList {

  sealed trait BanReason
  object BanReason {
    case object SemanticallyInvalidPersistentModifier extends BanReason
    case object SyntacticallyInvalidPersistentModifier extends BanReason
    case object SyntacticallyInvalidTransaction extends BanReason
    case object CorruptedSerializedBytes extends BanReason
    case object SpamSender extends BanReason
    case object SentPeersMessageWithoutRequest extends BanReason
    case object SentInvForPayload extends BanReason
    case object ExpiredNumberOfConnections extends BanReason
    final case class InvalidNetworkMessage(msgName: String) extends BanReason
    final case class InvalidResponseManifestMessage(error: String) extends BanReason
    final case class InvalidChunkMessage(error: String) extends BanReason
    final case class InvalidManifestHasChangedMessage(error: String) extends BanReason
    case object ExpiredNumberOfReRequestAttempts extends BanReason
    case object ExpiredNumberOfRequests extends BanReason
    final case class InvalidStateAfterFastSync(error: String) extends BanReason
    final case class PreSemanticInvalidModifier(error: String) extends BanReason
  }

  sealed trait BanType
  object BanType {
    case object PermanentBan extends BanType
    case object TemporaryBan extends BanType
  }

  final case class BanTime(time: Long) extends AnyVal

  def apply(settings: EncryAppSettings): BlackList =
    BlackList(settings, Map.empty[InetAddress, (BanReason, BanTime, BanType)])
} 
Example 93
Source File: PeersApiRoute.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http.routes

import java.net.{InetAddress, InetSocketAddress}
import akka.actor.{ActorRef, ActorRefFactory}
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import encry.api.http.DataHolderForApi._
import encry.api.http.routes.PeersApiRoute.PeerInfoResponse
import encry.network.BlackList.{BanReason, BanTime, BanType}
import encry.network.ConnectedPeersCollection.PeerInfo
import encry.settings.RESTApiSettings
import io.circe.Encoder
import io.circe.generic.semiauto._
import io.circe.syntax._
import scala.util.{Failure, Success, Try}

case class PeersApiRoute(override val settings: RESTApiSettings, dataHolder: ActorRef)(
  implicit val context: ActorRefFactory
) extends EncryBaseApiRoute {

  override lazy val route: Route = pathPrefix("peers") {
    connectedPeers ~ allPeers ~ bannedList ~ WebRoute.authRoute(connectPeer ~ removeFromBan, settings)
  }

  def allPeers: Route = (path("all") & get) {
    (dataHolder ? GetAllPeers)
      .mapTo[Seq[InetSocketAddress]]
      .map(_.map(_.toString).asJson).okJson()
  }

  def connectedPeers: Route = (path("connected") & get) {
    (dataHolder ? GetConnectedPeersHelper)
      .mapTo[Seq[PeerInfoResponse]].map(_.asJson).okJson()
  }

  def bannedList: Route = (path("banned") & get) {
    (dataHolder ? GetBannedPeersHelper).mapTo[Seq[(InetAddress, (BanReason, BanTime, BanType))]]
    .map(_.map(_.toString).asJson).okJson()
  }

  def connectPeer: Route = path("add") {
    post(entity(as[String]) { str =>
      complete {
        Try {
          val split = str.split(':')
          (split(0), split(1).toInt)
        } match {
          case Success((host, port)) =>
            dataHolder ! UserAddPeer(new InetSocketAddress(host, port))
            StatusCodes.OK
          case Failure(_) =>
            StatusCodes.BadRequest
        }
      }
    })
  }

  def removeFromBan: Route = path("remove") {
    post(entity(as[String]) { str =>
      complete {
        Try {
          val split = str.split(':')
          (split(0), split(1).toInt)
        } match {
          case Success((host, port)) =>
            dataHolder ! RemovePeerFromBanList(new InetSocketAddress(host, port))
            StatusCodes.OK
          case Failure(_) =>
            StatusCodes.BadRequest
        }
      }
    })
  }
}

object PeersApiRoute {

  case class PeerInfoResponse(address: String, name: Option[String], connectionType: Option[String])

  object PeerInfoResponse {

    def fromAddressAndInfo(address: InetSocketAddress, peerInfo: PeerInfo): PeerInfoResponse = PeerInfoResponse(
      address.toString,
      Some(peerInfo.connectedPeer.toString),
      Some(peerInfo.connectionType.toString)
    )
  }

  implicit val encodePeerInfoResponse: Encoder[PeerInfoResponse] = deriveEncoder
} 
Example 94
Source File: NetworkTime.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.utils

import java.net.InetAddress

import com.typesafe.scalalogging.StrictLogging
import encry.utils.NetworkTime.Time
import org.apache.commons.net.ntp.{NTPUDPClient, TimeInfo}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.util.Left
import scala.util.control.NonFatal

object NetworkTime {
  def localWithOffset(offset: Long): Long = System.currentTimeMillis() + offset

  type Offset = Long
  type Time = Long
}

protected case class NetworkTime(offset: NetworkTime.Offset, lastUpdate: NetworkTime.Time)

case class NetworkTimeProviderSettings(server: String, updateEvery: FiniteDuration, timeout: FiniteDuration)

class NetworkTimeProvider(ntpSettings: NetworkTimeProviderSettings) extends StrictLogging {

  private var state: State = Right(NetworkTime(0L, 0L))
  private var delta: Time = 0L

  private type State = Either[(NetworkTime, Future[NetworkTime]), NetworkTime]

  private def updateOffSet(): Option[NetworkTime.Offset] = {
    val client: NTPUDPClient = new NTPUDPClient()
    client.setDefaultTimeout(ntpSettings.timeout.toMillis.toInt)
    try {
      client.open()
      val info: TimeInfo = client.getTime(InetAddress.getByName(ntpSettings.server))
      info.computeDetails()
      Option(info.getOffset)
    } catch {
      case t: Throwable => None
    } finally {
      client.close()
    }
  }

  private def timeAndState(currentState: State): Future[(NetworkTime.Time, State)] =
    currentState match {
      case Right(nt) =>
        val time: Long = NetworkTime.localWithOffset(nt.offset)
        val state: Either[(NetworkTime, Future[NetworkTime]), NetworkTime] =
          if (time > nt.lastUpdate + ntpSettings.updateEvery.toMillis) {
            Left(nt -> Future(updateOffSet()).map { mbOffset =>
              logger.info("New offset adjusted: " + mbOffset)
              val offset = mbOffset.getOrElse(nt.offset)
              NetworkTime(offset, NetworkTime.localWithOffset(offset))
            })
          } else Right(nt)
        Future.successful((time, state))
      case Left((nt, networkTimeFuture)) =>
        networkTimeFuture
          .map(networkTime => NetworkTime.localWithOffset(networkTime.offset) -> Right(networkTime))
          .recover {
            case NonFatal(th) =>
              logger.warn(s"Failed to evaluate networkTimeFuture $th")
              NetworkTime.localWithOffset(nt.offset) -> Left(nt -> networkTimeFuture)
          }
    }

  def estimatedTime: Time = state match {
    case Right(nt) if NetworkTime.localWithOffset(nt.offset) <= nt.lastUpdate + ntpSettings.updateEvery.toMillis =>
      NetworkTime.localWithOffset(nt.offset)
    case _ => System.currentTimeMillis() + delta
  }

  def time(): Future[NetworkTime.Time] =
    timeAndState(state)
      .map { case (timeFutureResult, stateFutureResult) =>
        state = stateFutureResult
        delta = timeFutureResult - System.currentTimeMillis()
        timeFutureResult
      }

} 
Example 95
Source File: GetBannedPeers.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.cli.commands.peer

import java.net.InetAddress
import akka.actor.ActorRef
import encry.cli.Response
import encry.cli.commands.Command
import encry.settings.EncryAppSettings
import encry.utils.NetworkTimeProvider
import akka.pattern._
import akka.util.Timeout
import encry.api.http.DataHolderForApi.GetBannedPeersHelper
import encry.network.BlackList.{BanReason, BanTime, BanType}
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global

object GetBannedPeers extends Command {

  
  override def execute(args: Command.Args,
                       settings: EncryAppSettings,
                       dataHolder: ActorRef,
                       nodeId: Array[Byte],
                       ntp: NetworkTimeProvider): Future[Option[Response]] = {
    implicit val timeout: Timeout = Timeout(settings.restApi.timeout)
    (dataHolder ? GetBannedPeersHelper).mapTo[Seq[(InetAddress, (BanReason, BanTime, BanType))]]
      .map(x => Some(Response(x.toString)))
  }
} 
Example 96
Source File: BlackListTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.{InetAddress, InetSocketAddress}

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.modifiers.InstanceFactory
import encry.network.BlackList.BanReason._
import encry.network.PeerConnectionHandler.{ConnectedPeer, Outgoing}
import encry.network.PeerConnectionHandler.ReceivableMessages.CloseConnection
import encry.network.PeersKeeper.BanPeer
import encry.settings.TestNetSettings
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.scalatest.{BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike}
import scala.concurrent.duration._

class BlackListTests extends WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with InstanceFactory
  with OneInstancePerTest
  with TestNetSettings {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val knowPeersSettings = testNetSettings.copy(
    network = settings.network.copy(
      knownPeers = List(new InetSocketAddress("172.16.11.11", 9001)),
      connectOnlyWithKnownPeers = Some(true)
    ),
    blackList = settings.blackList.copy(
      banTime = 2 seconds,
      cleanupTime = 3 seconds
    ))

  
  "Peers keeper" should {
    "handle ban peer message correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SpamSender)
      peerHandler.expectMsg(CloseConnection)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
    "cleanup black list by scheduler correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SentPeersMessageWithoutRequest)
      Thread.sleep(6000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe false
    }
    "don't remove peer from black list before ban time expired" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      Thread.sleep(4000)
      peersKeeper ! BanPeer(connectedPeer, CorruptedSerializedBytes)
      Thread.sleep(2000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
  }
} 
Example 97
Source File: CIDRRulesSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.http

import java.net.InetAddress

import com.webtrends.harness.authentication.CIDRRules
import org.specs2.mutable.SpecificationWithJUnit

class CIDRRulesSpec extends SpecificationWithJUnit {
  sequential

  "CIDRRules " should {
    "allow ipv4 requests through" in {
      val rules = CIDRRules(Seq("127.0.0.1/30"), Seq())
      rules.checkCidrRules(InetAddress.getByName("localhost")) equals true
    }

    "allow ipv4 requests through" in {
      val rules = CIDRRules(Seq("127.0.0.1/30"), Seq())
      rules.checkCidrRules(InetAddress.getByName("127.0.0.1")) equals true
    }

    "deny ipv4 requests on wrong address" in {
      val rules = CIDRRules(Seq("127.0.0.1/30"), Seq())
      rules.checkCidrRules(InetAddress.getByName("15.12.13.12")) equals false
    }

    "deny ipv4 requests in the deny subnet" in {
      val rules = CIDRRules(Seq("127.0.0.1/30"), Seq("127.10.0.1/30"))
      rules.checkCidrRules(InetAddress.getByName("127.10.0.1")) equals false
    }

    "allow ipv6 requests through" in {
      val rules = CIDRRules(Seq("1:0:0:0:0:0:0:1"), Seq())
      rules.checkCidrRules(InetAddress.getByName("1:0:0:0:0:0:0:1")) equals true
    }

    "deny ipv6 requests in the deny subnet" in {
      val rules = CIDRRules(Seq("1:0:0:0:0:0:0:1"), Seq("1:0:0:0:0:5:0:1"))
      rules.checkCidrRules(InetAddress.getByName("1:0:0:0:0:5:0:1")) equals false
    }
  }
} 
Example 98
Source File: HttpRequestConversionSupport.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.persistence.serializers

import java.net.InetAddress

import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model.{ HttpEntity, HttpHeader, HttpMethod, HttpMethods, HttpProtocol, HttpRequest, RemoteAddress, Uri }
import com.ing.wbaa.rokku.proxy.data.{ UserAssumeRole, UserRawJson }
import spray.json.DefaultJsonProtocol

import scala.collection.immutable

trait HttpRequestConversionSupport extends DefaultJsonProtocol {

  case class SimplifiedRemoteAddress(host: String) {
    def toRemoteAddr: RemoteAddress = {
      val a = host.split(":")
      RemoteAddress(InetAddress.getByName(a(0)), Some(a(1).toInt))
    }
  }

  case class SimplifiedHttpRequest(method: String, uri: String, headers: List[String], entity: String, httpProtocol: String)

  implicit val httpRequestF = jsonFormat5(SimplifiedHttpRequest)
  implicit val userRoleF = jsonFormat1(UserAssumeRole)
  implicit val userSTSF = jsonFormat5(UserRawJson)
  implicit val remoteAddressF = jsonFormat1(SimplifiedRemoteAddress)

  private[persistence] def convertAkkaHeadersToStrings(headers: Seq[HttpHeader]): List[String] = headers.map(h => s"${h.name()}=${h.value()}").toList

  private def convertStringsToAkkaHeaders(headers: List[String]): immutable.Seq[HttpHeader] = headers.map { p =>
    val kv = p.split("=")
    HttpHeader.parse(kv(0), kv(1)) match {
      case ParsingResult.Ok(header, _) => header
      case ParsingResult.Error(error)  => throw new Exception(s"Unable to convert to HttpHeader: ${error.summary}")
    }
  }

  private def httpMethodFrom(m: String): HttpMethod = m match {
    case "GET"    => HttpMethods.GET
    case "HEAD"   => HttpMethods.HEAD
    case "PUT"    => HttpMethods.PUT
    case "POST"   => HttpMethods.POST
    case "DELETE" => HttpMethods.DELETE
  }

  private[persistence] def toAkkaHttpRequest(s: SimplifiedHttpRequest): HttpRequest =
    HttpRequest(
      httpMethodFrom(s.method),
      Uri(s.uri),
      convertStringsToAkkaHeaders(s.headers),
      HttpEntity(s.entity),
      HttpProtocol(s.httpProtocol)
    )
} 
Example 99
Source File: AuditLogProviderItTest.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.provider

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, RemoteAddress, StatusCodes}
import com.ing.wbaa.rokku.proxy.config.KafkaSettings
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.RequestTypeUnknown
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

class AuditLogProviderItTest extends AnyWordSpecLike with Diagrams with EmbeddedKafka with AuditLogProvider {

  implicit val testSystem: ActorSystem = ActorSystem("kafkaTest")

  private val testKafkaPort = 9093

  override def auditEnabled = true

  override implicit val kafkaSettings: KafkaSettings = new KafkaSettings(testSystem.settings.config) {
    override val bootstrapServers: String = s"localhost:$testKafkaPort"
  }

  override implicit val executionContext: ExecutionContext = testSystem.dispatcher

  implicit val requestId: RequestId = RequestId("test")

  val s3Request = S3Request(AwsRequestCredential(AwsAccessKey("a"), None), Some("demobucket"), Some("s3object"), Read())
    .copy(headerIPs = HeaderIPs(Some(RemoteAddress(InetAddress.getByName("127.0.0.1"))),
      Some(Seq(RemoteAddress(InetAddress.getByName("1.1.1.1")))),
      Some(RemoteAddress(InetAddress.getByName("2.2.2.2")))))

  "AuditLogProvider" should {
    "send audit" in {
      implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort)

      withRunningKafka {
        Thread.sleep(3000)
        val createEventsTopic = "audit_events"
        createCustomTopic(createEventsTopic)
        auditLog(s3Request, HttpRequest(HttpMethods.PUT, "http://localhost", Nil), "testUser", RequestTypeUnknown(), StatusCodes.Processing)
        val result = consumeFirstStringMessageFrom(createEventsTopic)
        assert(result.contains("\"eventName\":\"PUT\""))
        assert(result.contains("\"sourceIPAddress\":\"ClientIp=unknown|X-Real-IP=127.0.0.1|X-Forwarded-For=1.1.1.1|Remote-Address=2.2.2.2\""))
        assert(result.contains("\"x-amz-request-id\":\"test\""))
        assert(result.contains("\"principalId\":\"testUser\""))
      }
    }
  }

} 
Example 100
Source File: MessageProviderKafkaItTest.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.provider

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpMethods, RemoteAddress}
import com.ing.wbaa.rokku.proxy.config.KafkaSettings
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.RequestTypeUnknown
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.RecoverMethods._
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

class MessageProviderKafkaItTest extends AnyWordSpecLike with Diagrams with EmbeddedKafka with MessageProviderKafka {

  implicit val testSystem: ActorSystem = ActorSystem("kafkaTest")

  private val testKafkaPort = 9093

  override implicit val kafkaSettings: KafkaSettings = new KafkaSettings(testSystem.settings.config) {
    override val bootstrapServers: String = s"localhost:$testKafkaPort"
  }

  override implicit val executionContext: ExecutionContext = testSystem.dispatcher

  implicit val requestId: RequestId = RequestId("test")

  val s3Request = S3Request(AwsRequestCredential(AwsAccessKey("a"), None), Some("demobucket"), Some("s3object"), Read())
    .copy(clientIPAddress = RemoteAddress(InetAddress.getByName("127.0.0.1")))

  "KafkaMessageProvider" should {
    "Send message to correct topic with Put or Post" in {
      implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort)

      withRunningKafka {
        Thread.sleep(3000)
        val createEventsTopic = "create_events"
        createCustomTopic(createEventsTopic)
        emitEvent(s3Request, HttpMethods.PUT, "testUser", RequestTypeUnknown())
        val result = consumeFirstStringMessageFrom(createEventsTopic)
        assert(result.contains("s3:ObjectCreated:PUT"))
      }
    }

    "Send message to correct topic with Delete" in {
      implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort)

      withRunningKafka {
        Thread.sleep(3000)
        val deleteEventsTopic = "delete_events"
        createCustomTopic(deleteEventsTopic)
        emitEvent(s3Request, HttpMethods.DELETE, "testUser", RequestTypeUnknown())
        assert(consumeFirstStringMessageFrom(deleteEventsTopic).contains("s3:ObjectRemoved:DELETE"))
      }
    }

    "fail on incomplete data" in {
      recoverToSucceededIf[Exception](emitEvent(s3Request.copy(s3Object = None), HttpMethods.PUT, "testUser", RequestTypeUnknown()))
    }
  }

} 
Example 101
Source File: HeaderIPsSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.data

import java.net.InetAddress

import akka.http.scaladsl.model.RemoteAddress
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpec

class HeaderIPsSpec extends AnyWordSpec with Diagrams {

  private[this] val address1 = RemoteAddress(InetAddress.getByName("1.1.1.1"), None)
  private[this] val address2 = RemoteAddress(InetAddress.getByName("1.1.1.2"), None)
  private[this] val address3 = RemoteAddress(InetAddress.getByName("1.1.1.3"), None)
  private[this] val address4 = RemoteAddress(InetAddress.getByName("1.1.1.4"), None)

  val headerIPs = HeaderIPs(
    `X-Real-IP` = Some(address1),
    `X-Forwarded-For` = Some(Seq(address2, address3)),
    `Remote-Address` = Some(address4)
  )

  "HeaderIPs" should {
    "return all IPs" that {
      "are in X-Real-IP" in {
        assert(headerIPs.allIPs.contains(address1))
      }
      "are in X-Forwarded-For" in {
        assert(headerIPs.allIPs.contains(address2))
        assert(headerIPs.allIPs.contains(address3))
      }
      "are in Remote-Address" in {
        assert(headerIPs.allIPs.contains(address4))
      }
      "in toString method" in {
        assert(headerIPs.toStringList contains "X-Real-IP=1.1.1.1")
        assert(headerIPs.toStringList contains "X-Forwarded-For=1.1.1.2,1.1.1.3")
        assert(headerIPs.toStringList contains "Remote-Address=1.1.1.4")
      }
    }
  }
} 
Example 102
Source File: HttpRequestRecorderSpec.scala    From rokku   with Apache License 2.0 5 votes vote down vote up
package com.ing.wbaa.rokku.proxy.persistence

import java.net.InetAddress

import akka.actor.{ ActorSystem, PoisonPill, Props }
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model._
import akka.testkit.{ ImplicitSender, TestKit }
import com.ing.wbaa.rokku.proxy.data._
import com.ing.wbaa.rokku.proxy.persistence.HttpRequestRecorder.{ ExecutedRequestCmd, LatestRequests, LatestRequestsResult }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.diagrams.Diagrams
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.immutable

class HttpRequestRecorderSpec extends TestKit(ActorSystem("RequestRecorderTest")) with ImplicitSender
  with AnyWordSpecLike with Diagrams with BeforeAndAfterAll {

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  private def convertStringsToAkkaHeaders(headers: List[String]): immutable.Seq[HttpHeader] = headers.map { p =>
    val kv = p.split("=")
    HttpHeader.parse(kv(0), kv(1)) match {
      case ParsingResult.Ok(header, _) => header
      case ParsingResult.Error(error)  => throw new Exception(s"Unable to convert to HttpHeader: ${error.summary}")
    }
  }

  val requestRecorder = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-1")

  val headers = List("Remote-Address=0:0:0:0:0:0:0:1:58170", "Host=localhost:8987",
    "X-Amz-Content-SHA256=02502914aca52472205417e4c418ee499ba39ca1b283d99da26e295df2eccf32",
    "User-Agent=aws-cli/1.16.30 Python/2.7.5 Linux/3.10.0-862.14.4.el7.x86_64 botocore/1.12.20",
    "Content-MD5=Wf7l+rCPsVw8eqc34kVJ1g==",
    "Authorization=AWS4-HMAC-SHA256 Credential=6r24619bHVWvrxR5AMHNkGZ6vNRXoGCP/20190704/us-east-1/s3/aws4_request",
    "SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date;x-amz-security-token",
    "Signature=271dda503da6fcf04cc058cb514b28a6d522a9b712ab553bfb88fb7814ab082f")

  val httpRequest = HttpRequest(
    HttpMethods.PUT,
    Uri("http://127.0.0.1:8010/home/testuser/file34"),
    convertStringsToAkkaHeaders(headers),
    HttpEntity.Empty.withContentType(ContentTypes.`application/octet-stream`).toString(),
    HttpProtocols.`HTTP/1.1`
  )
  val userSTS = User(UserName("okUser"), Set(UserGroup("okGroup")), AwsAccessKey("accesskey"), AwsSecretKey("secretkey"), UserAssumeRole(""))
  val clientIPAddress = RemoteAddress(InetAddress.getByName("localhost"), Some(1234))

  "RequestRecorder" should {
    "persist Http request event" in {
      requestRecorder ! ExecutedRequestCmd(httpRequest, userSTS, clientIPAddress)
      requestRecorder ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
      requestRecorder ! PoisonPill

      val requestRecorder1 = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-2")
      requestRecorder1 ! LatestRequests(1)
      expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress))))
    }
  }

} 
Example 103
Source File: RecommenderController.scala    From spark_recommender   with Apache License 2.0 5 votes vote down vote up
package es.alvsanand.spark_recommender.recommender

import java.net.InetAddress

import akka.actor.ActorSystem
import com.mongodb.casbah.{MongoClient, MongoClientURI}
import es.alvsanand.spark_recommender.model._
import es.alvsanand.spark_recommender.utils.{ESConfig, Logging, MongoConfig}
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.transport.client.PreBuiltTransportClient
import spray.httpx.SprayJsonSupport
import spray.json.{DefaultJsonProtocol, NullOptions}
import spray.routing.SimpleRoutingApp


object RecommenderControllerProtocol extends DefaultJsonProtocol with NullOptions with SprayJsonSupport {
  implicit val productRecommendationRequestFormat = jsonFormat1(ProductRecommendationRequest)
  implicit val userRecommendationRequestFormat = jsonFormat1(UserRecommendationRequest)
  implicit val searchRecommendationRequestFormat = jsonFormat1(SearchRecommendationRequest)
  implicit val productHybridRecommendationRequestFormat = jsonFormat1(ProductHybridRecommendationRequest)
  implicit val recommendationFormat = jsonFormat2(Recommendation)
  implicit val hybridRecommendationFormat = jsonFormat3(HybridRecommendation)
}


object RecommenderController extends SimpleRoutingApp with Logging{
  val ES_HOST_PORT_REGEX = "(.+):(\\d+)".r

  import RecommenderControllerProtocol._

  implicit val system = ActorSystem("ActorSystem")

  def run(serverPort: Int)(implicit mongoConf: MongoConfig, esConf: ESConfig): Unit = {
    implicit val mongoClient = MongoClient(MongoClientURI(mongoConf.uri))
    implicit val esClient = new PreBuiltTransportClient(Settings.EMPTY)
    esConf.transportHosts.split(";")
      .foreach { case ES_HOST_PORT_REGEX(host: String, port: String) => esClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host), port.toInt)) }

    logger.info("Launching REST serves[port=%d]".format(serverPort))

    startServer(interface = "localhost", port = serverPort) {
      path("recs" / "cf" / "pro") {
        post(
          entity(as[ProductRecommendationRequest]) { request =>
            complete {
              RecommenderService.getCollaborativeFilteringRecommendations(request).toStream
            }
          }
        )
      } ~
      path("recs" / "cf" / "usr") {
        post(
          entity(as[UserRecommendationRequest]) { request =>
            complete {
              RecommenderService.getCollaborativeFilteringRecommendations(request).toStream
            }
          }
        )
      } ~
      path("recs" / "cb" / "mrl") {
        post(
          entity(as[ProductRecommendationRequest]) { request =>
            complete {
              RecommenderService.getContentBasedMoreLikeThisRecommendations(request).toStream
            }
          }
        )
      } ~
      path("recs" / "cb" / "sch") {
        post(
          entity(as[SearchRecommendationRequest]) { request =>
            complete {
              RecommenderService.getContentBasedSearchRecommendations(request).toStream
            }
          }
        )
      } ~
        path("recs" / "hy" / "pro") {
          post(
            entity(as[ProductHybridRecommendationRequest]) { request =>
              complete {
                RecommenderService.getHybridRecommendations(request).toStream
              }
            }
          )
        }
    }
  }
} 
Example 104
Source File: JGitSystemReader.scala    From sbt-dynver   with Apache License 2.0 5 votes vote down vote up
package sbtdynver

import java.io.{ File, IOException }
import java.net.{ InetAddress, UnknownHostException }
import java.nio.file.{ Files, InvalidPathException, Path, Paths }

import org.eclipse.jgit.internal.JGitText
import org.eclipse.jgit.lib.{ Config, Constants }
import org.eclipse.jgit.storage.file.FileBasedConfig
import org.eclipse.jgit.util.{ FS, StringUtils, SystemReader }
import org.slf4j.LoggerFactory

// Copy of org.eclipse.jgit.util.SystemReader.Default with:
// * calls to Files.createDirectories guarded by if !Files.isDirectory
//   necessary because my ~/.config is a symlink to a directory
//   which Files.createDirectories isn't happy with
object JGitSystemReader extends SystemReader {
  private val LOG = LoggerFactory.getLogger(getClass)

  lazy val init: Unit = SystemReader.setInstance(this)

  override lazy val getHostname = {
    try InetAddress.getLocalHost.getCanonicalHostName
    catch { case _: UnknownHostException => "localhost" }
  }.ensuring(_ != null)

  override def getenv(variable: String): String = System.getenv(variable)
  override def getProperty(key: String): String = System.getProperty(key)
  override def getCurrentTime: Long             = System.currentTimeMillis
  override def getTimezone(when: Long): Int     = getTimeZone.getOffset(when) / (60 * 1000)

  override def openUserConfig(parent: Config, fs: FS) =
    new FileBasedConfig(parent, new File(fs.userHome, ".gitconfig"), fs)

  override def openSystemConfig(parent: Config, fs: FS): FileBasedConfig = {
    if (StringUtils.isEmptyOrNull(getenv(Constants.GIT_CONFIG_NOSYSTEM_KEY))) {
      val configFile = fs.getGitSystemConfig
      if (configFile != null) return new FileBasedConfig(parent, configFile, fs)
    }
    new FileBasedConfig(parent, null, fs) {
      override def load(): Unit = () // do not load
      override def isOutdated   = false // regular class would bomb here
    }
  }

  override def openJGitConfig(parent: Config, fs: FS): FileBasedConfig = {
    val xdgPath = getXDGConfigHome(fs)
    if (xdgPath != null) {
      var configPath: Path = null
      try {
        configPath = xdgPath.resolve("jgit")
        if (!Files.isDirectory(configPath))
          Files.createDirectories(configPath)
        configPath = configPath.resolve(Constants.CONFIG)
        return new FileBasedConfig(parent, configPath.toFile, fs)
      } catch {
        case e: IOException =>
          LOG.error(JGitText.get.createJGitConfigFailed, configPath: Any, e)
      }
    }
    new FileBasedConfig(parent, new File(fs.userHome, ".jgitconfig"), fs)
  }

  private def getXDGConfigHome(fs: FS): Path = {
    var configHomePath = getenv(Constants.XDG_CONFIG_HOME)
    if (StringUtils.isEmptyOrNull(configHomePath))
      configHomePath = new File(fs.userHome, ".config").getAbsolutePath
    try {
      val xdgHomePath = Paths.get(configHomePath)
      if (!Files.isDirectory(xdgHomePath))
        Files.createDirectories(xdgHomePath)
      xdgHomePath
    } catch {
      case e @ (_: IOException | _: InvalidPathException) =>
        LOG.error(JGitText.get.createXDGConfigHomeFailed, configHomePath: Any, e)
        null
    }
  }
} 
Example 105
Source File: HttpUpload.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.client

import java.net.{InetAddress, NetworkInterface}

import bad.robot.temperature.IpAddress._
import bad.robot.temperature._
import cats.effect.IO
import org.http4s.Status.Successful
import org.http4s.Uri.{Authority, IPv4, Scheme}
import org.http4s.client.dsl.Http4sClientDsl.WithBodyOps
import org.http4s.client.{Client => Http4sClient}
import org.http4s.dsl.io._
import org.http4s.headers.`X-Forwarded-For`
import org.http4s.{Uri, _}
import scalaz.{-\/, \/, \/-}

import scala.collection.JavaConverters._

case class HttpUpload(address: InetAddress, client: Http4sClient[IO]) extends TemperatureWriter {

  private implicit val encoder = jsonEncoder[Measurement]

  private val decoder = EntityDecoder.text[IO]
  
  def write(measurement: Measurement): Error \/ Unit = {
    val uri = Uri(
      scheme = Some(Scheme.http),
      authority = Some(Authority(host = IPv4(address.getHostAddress), port = Some(11900))),
      path = "/temperature"
    )

    val request: IO[Request[IO]] = PUT.apply(uri, measurement, `X-Forwarded-For`(currentIpAddress))

    val fetch: IO[Error \/ Unit] = client.fetch(request) {
      case Successful(_) => IO.pure(\/-(()))
      case error @ _     => IO(-\/(UnexpectedError(s"Failed to PUT temperature data to ${uri.renderString}, response was ${error.status}: ${error.as[String](implicitly, decoder).attempt.unsafeRunSync}")))
    }

    // why no leftMap?
    fetch.attempt.map {
      case Left(t)      => -\/(UnexpectedError(s"Failed attempting to connect to $address to send $measurement\n\nError was: $t"))
      case Right(value) => value
    }.unsafeRunSync()
  }
}

object HttpUpload {

  val allNetworkInterfaces: List[NetworkInterface] = {
    NetworkInterface.getNetworkInterfaces
      .asScala
      .toList
      .filter(_.isUp)
      .filterNot(_.isLoopback)
  }

} 
Example 106
Source File: Client.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.client

import java.net.InetAddress

import bad.robot.logging.{error, _}
import bad.robot.temperature.ds18b20.SensorFile
import bad.robot.temperature.ds18b20.SensorFile._
import bad.robot.temperature.rrd.Host
import bad.robot.temperature.rrd.RrdFile._
import bad.robot.temperature.task.IOs._
import cats.implicits._
import cats.effect.IO
import fs2.StreamApp._
import fs2.Stream
import scalaz.{-\/, \/-}

object Client {

  private val clientHttpPort = 11900

  private val client: List[SensorFile] => Stream[IO, ExitCode] = sensors => {
    for {
      _        <- Stream.eval(info(s"Initialising client '${Host.local.name}' (with ${sensors.size} of a maximum of $MaxSensors sensors)..."))
      server   <- Stream.eval(IO(DiscoveryClient.discover))
      _        <- Stream.eval(info(s"Server discovered on ${server.getHostAddress}, monitoring temperatures..."))
      _        <- Stream.eval(record(Host.local, sensors, HttpUpload(server, BlazeHttpClient())))
      exitCode <- ClientsLogHttpServer(clientHttpPort)
      _        <- Stream.eval(info(s"HTTP Server started to serve logs on http://${InetAddress.getLocalHost.getHostAddress}:$clientHttpPort"))
    } yield exitCode
  }

  def stream(args: List[String], requestShutdown: IO[Unit]): Stream[IO, ExitCode] = {
    findSensors match {
      case \/-(sensors) => client(sensors)
      case -\/(cause)   => Stream.eval(error(cause.message)).flatMap(_ => Stream.emit(ExitCode(1)))
    }
  }
} 
Example 107
Source File: Server.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.server

import java.net.InetAddress

import bad.robot.logging._
import bad.robot.temperature.Error
import bad.robot.temperature.CommandLineError
import bad.robot.temperature.client.{BlazeHttpClient, HttpUpload}
import bad.robot.temperature.ds18b20.SensorFile
import bad.robot.temperature.ds18b20.SensorFile._
import bad.robot.temperature.rrd.{Host, Rrd}
import bad.robot.temperature.task.IOs._
import bad.robot.temperature.task.TemperatureMachineThreadFactory
import cats.effect.IO
import fs2.Stream
import fs2.StreamApp.ExitCode
import scalaz.{-\/, \/, \/-}
import scalaz.syntax.either.ToEitherOps

object Server {

  private def discovery(implicit hosts: List[Host]): IO[Unit] = {
    for {
      _        <- info(s"Starting Discovery Server, listening for ${hosts.map(_.name).mkString("'", "', '", "'")}...")
      listener <- IO(TemperatureMachineThreadFactory("machine-discovery-server").newThread(new DiscoveryServer()).start())
    } yield ()
  }

  private def http(temperatures: AllTemperatures, connections: Connections)(implicit hosts: List[Host]): Stream[IO, ExitCode] = {
    val port = 11900
    for {
      server <- HttpServer(port, hosts).asStream(temperatures, connections)
      _      <- Stream.eval(info(s"HTTP Server started on http://${InetAddress.getLocalHost.getHostAddress}:$port"))
    } yield server
  }

  private def server(temperatures: AllTemperatures, connections: Connections, sensors: List[SensorFile])(implicit hosts: List[Host]): Stream[IO, ExitCode] = {
    for {
      _        <- Stream.eval(info("Starting temperature-machine (server mode)..."))
      _        <- Stream.eval(init(hosts))
      _        <- Stream.eval(discovery)
      _        <- Stream.eval(gather(temperatures, Rrd(hosts)))
      _        <- Stream.eval(record(Host.local, sensors, HttpUpload(InetAddress.getLocalHost, BlazeHttpClient())))
      _        <- Stream.eval(graphing)
      _        <- Stream.eval(exportJson)
      exitCode <- http(temperatures, connections)
    } yield exitCode
  }

  private def extractHosts(args: List[String]): Error \/ List[Host] = args match {
    case Nil   => CommandLineError().left
    case hosts => hosts.map(host => Host(host)).right
  }

  def stream(args: List[String], requestShutdown: IO[Unit]): Stream[IO, ExitCode] = {
    val application = for {
      hosts        <- extractHosts(args)
      sensors      <- findSensors
      temperatures  = AllTemperatures()
      connections   = Connections()
    } yield server(temperatures, connections, sensors)(hosts)

    application match {
      case \/-(ok)    => ok
      case -\/(cause) => Stream.eval(error(cause.message)).flatMap(_ => Stream.emit(ExitCode(1)))
    }
  }
} 
Example 108
Source File: ConnectionsEndpoint.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.server

import java.net.InetAddress
import java.time.Clock

import bad.robot.temperature.IpAddress._
import bad.robot.temperature._
import cats.data.NonEmptyList
import cats.effect.IO
import cats.implicits._
import org.http4s._
import org.http4s.dsl.io._

object ConnectionsEndpoint {

  private implicit val encoder = jsonEncoder[List[Connection]]

  def apply(connections: Connections, ipAddresses: => NonEmptyList[Option[InetAddress]] = currentIpAddress)(implicit clock: Clock) = HttpService[IO] {
    case GET -> Root / "connections" => {
      Ok(connections.all).map(_.putHeaders(xForwardedHost(ipAddresses)))
    }

    case GET -> Root / "connections" / "active" / "within" / LongVar(period) / "mins" => {
      Ok(connections.allWithin(period)).map(_.putHeaders(xForwardedHost(ipAddresses)))
    }
  }

  private def xForwardedHost(ipAddresses: NonEmptyList[Option[InetAddress]]): Header = {
    Header("X-Forwarded-Host", ipAddresses.map(_.fold("unknown")(_.getHostAddress)).mkString_("", ", ", ""))
  }

} 
Example 109
Source File: HttpUploadTest.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.client

import java.net.InetAddress

import bad.robot.temperature.rrd.{Host, Seconds}
import bad.robot.temperature.{IpAddress, Measurement, SensorReading, Temperature, UnexpectedError, jsonEncoder}
import cats.data.Kleisli
import cats.effect.IO
import org.http4s.Method.PUT
import org.http4s.client.{DisposableResponse, Client => Http4sClient}
import org.http4s.dsl.io._
import org.http4s.{EntityDecoder, Request}
import org.specs2.matcher.DisjunctionMatchers._
import org.specs2.mutable.Specification

class HttpUploadTest extends Specification {

  "Ip address pre-check" >> {
    IpAddress.currentIpAddress.size must be_>(0)
  }
  
  "Encode a measurement for the wire" >> {
    def encodeMessageViaEntityEncoder(measurement: Measurement): String = {
      implicit val encoder = jsonEncoder[Measurement]
      val request: IO[Request[IO]] = Request(PUT).withBody(measurement)
      EntityDecoder.decodeString(request.unsafeRunSync()).unsafeRunSync()
    }

    val measurement = Measurement(Host("example"), Seconds(1509221361), List(SensorReading("28-0115910f5eff", Temperature(19.75))))
    encodeMessageViaEntityEncoder(measurement) must_== """|{
                                                          |  "host" : {
                                                          |    "name" : "example",
                                                          |    "utcOffset" : null,
                                                          |    "timezone" : null
                                                          |  },
                                                          |  "seconds" : 1509221361,
                                                          |  "sensors" : [
                                                          |    {
                                                          |      "name" : "28-0115910f5eff",
                                                          |      "temperature" : {
                                                          |        "celsius" : 19.75
                                                          |      }
                                                          |    }
                                                          |  ]
                                                          |}""".stripMargin
  }
  
  "Error response from server" >> {
    val measurement = Measurement(Host("example"), Seconds(1509221361), List(SensorReading("28-0115910f5eff", Temperature(19.75))))
    
    val error = InternalServerError("I'm an error").map(DisposableResponse(_, IO.pure(())))
    val willError: Kleisli[IO, Request[IO], DisposableResponse[IO]] = new Kleisli[IO, Request[IO], DisposableResponse[IO]](_ => error)
    
    val client = Http4sClient[IO](willError, IO.pure(()))

    val upload = HttpUpload(InetAddress.getLoopbackAddress, client)
    val value = upload.write(measurement)
    value must be_-\/.like {
      case UnexpectedError("""Failed to PUT temperature data to http://127.0.0.1:11900/temperature, response was 500 Internal Server Error: Right(I'm an error)""") => ok
    }
  }
  
  "Request has headers" >> {
    val measurement = Measurement(Host("example"), Seconds(1509221361), List(SensorReading("28-0115910f5eff", Temperature(19.75))))
    
    var headers = List[String]()
    
    val client = Http4sClient[IO](new Kleisli[IO, Request[IO], DisposableResponse[IO]](request => {
      headers = request.headers.map(_.name.toString()).toList
      Ok().map(DisposableResponse(_, IO.pure(())))
    }), IO.pure(()))

    val upload = HttpUpload(InetAddress.getLoopbackAddress, client)
    upload.write(measurement)
    
    headers must_== List(
      "Content-Type",
      "X-Forwarded-For",
      "Content-Length"
    )
  }
} 
Example 110
Source File: HostTest.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.rrd

import java.net.InetAddress

import bad.robot.temperature._
import org.specs2.matcher.DisjunctionMatchers.be_\/-
import org.specs2.mutable.Specification

class HostTest extends Specification {

  "Trims to max 20 characters (including the 'sensor-n' postfix)" >> {
    Host("cheetah.local") must_== Host("cheetah.loc") // aka the host name is equal
    "cheetah.loc-sensor-1".length must_== 20
  }

  "Doesn't trim" >> {
    Host("kitchen", None) must_== Host("kitchen", None, None)
  }

  "Local host" >> {
    Host.local.name must_== InetAddress.getLocalHost.getHostName.take(11)
  }

  "Encode Json" >> {
    encode(Host("local", None, None)).spaces2ps must_==
      """{
        |  "name" : "local",
        |  "utcOffset" : null,
        |  "timezone" : null
        |}""".stripMargin

    encode(Host("local", Some("+03:00"), Some("Tehran"))).spaces2ps must_==
      """{
        |  "name" : "local",
        |  "utcOffset" : "+03:00",
        |  "timezone" : "Tehran"
        |}""".stripMargin
  }

  "Decode Json (what happens if a UTC offset isn't supplied?)" >> {
    val json = """{ "name" : "local" }"""
    decodeAsDisjunction[Host](json) must be_\/-(Host("local", utcOffset = None, timezone = None))
  }
  
} 
Example 111
Source File: Dns.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.net.{ Inet4Address, Inet6Address, InetAddress, UnknownHostException }

import akka.actor._
import akka.routing.ConsistentHashingRouter.ConsistentHashable
import com.typesafe.config.Config

import scala.collection.{ breakOut, immutable }

abstract class Dns {
  def cached(name: String): Option[Dns.Resolved] = None
  def resolve(name: String)(system: ActorSystem, sender: ActorRef): Option[Dns.Resolved] = {
    val ret = cached(name)
    if (ret.isEmpty)
      IO(Dns)(system).tell(Dns.Resolve(name), sender)
    ret
  }
}

object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider {
  sealed trait Command

  case class Resolve(name: String) extends Command with ConsistentHashable {
    override def consistentHashKey = name
  }

  case class Resolved(name: String, ipv4: immutable.Seq[Inet4Address], ipv6: immutable.Seq[Inet6Address]) extends Command {
    val addrOption: Option[InetAddress] = IpVersionSelector.getInetAddress(ipv4.headOption, ipv6.headOption)

    @throws[UnknownHostException]
    def addr: InetAddress = addrOption match {
      case Some(ipAddress) ⇒ ipAddress
      case None ⇒ throw new UnknownHostException(name)
    }
  }

  object Resolved {
    def apply(name: String, addresses: Iterable[InetAddress]): Resolved = {
      val ipv4: immutable.Seq[Inet4Address] = addresses.collect({
        case a: Inet4Address ⇒ a
      })(breakOut)
      val ipv6: immutable.Seq[Inet6Address] = addresses.collect({
        case a: Inet6Address ⇒ a
      })(breakOut)
      Resolved(name, ipv4, ipv6)
    }
  }

  def cached(name: String)(system: ActorSystem): Option[Resolved] = {
    Dns(system).cache.cached(name)
  }

  def resolve(name: String)(system: ActorSystem, sender: ActorRef): Option[Resolved] = {
    Dns(system).cache.resolve(name)(system, sender)
  }

  override def lookup() = Dns

  override def createExtension(system: ExtendedActorSystem): DnsExt = new DnsExt(system)

  
  override def get(system: ActorSystem): DnsExt = super.get(system)
}

class DnsExt(system: ExtendedActorSystem) extends IO.Extension {
  val Settings = new Settings(system.settings.config.getConfig("akka.io.dns"))

  class Settings private[DnsExt] (_config: Config) {

    import _config._

    val Dispatcher: String = getString("dispatcher")
    val Resolver: String = getString("resolver")
    val ResolverConfig: Config = getConfig(Resolver)
    val ProviderObjectName: String = ResolverConfig.getString("provider-object")
  }

  val provider: DnsProvider = system.dynamicAccess.getClassFor[DnsProvider](Settings.ProviderObjectName).get.newInstance()
  val cache: Dns = provider.cache

  val manager: ActorRef = {
    system.systemActorOf(
      props = Props(provider.managerClass, this).withDeploy(Deploy.local).withDispatcher(Settings.Dispatcher),
      name = "IO-DNS")
  }

  def getResolver: ActorRef = manager
}

object IpVersionSelector {
  def getInetAddress(ipv4: Option[Inet4Address], ipv6: Option[Inet6Address]): Option[InetAddress] =
    System.getProperty("java.net.preferIPv6Addresses") match {
      case "true" ⇒ ipv6 orElse ipv4
      case _ ⇒ ipv4 orElse ipv6
    }
} 
Example 112
Source File: InetAddressDnsResolver.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.net.{ InetAddress, UnknownHostException }
import java.security.Security
import java.util.concurrent.TimeUnit

import akka.actor.Actor
import com.typesafe.config.Config

import scala.collection.immutable
import akka.util.Helpers.Requiring

import scala.util.Try


class InetAddressDnsResolver(cache: SimpleDnsCache, config: Config) extends Actor {

  // Controls the cache policy for successful lookups only
  private final val CachePolicyProp = "networkaddress.cache.ttl"
  // Deprecated JVM property key, keeping for legacy compatibility; replaced by CachePolicyProp
  private final val CachePolicyPropFallback = "sun.net.inetaddr.ttl"

  // Controls the cache policy for negative lookups only
  private final val NegativeCachePolicyProp = "networkaddress.cache.negative.ttl"
  // Deprecated JVM property key, keeping for legacy compatibility; replaced by NegativeCachePolicyProp
  private final val NegativeCachePolicyPropFallback = "sun.net.inetaddr.negative.ttl"

  // default values (-1 and 0 are magic numbers, trust them)
  private final val Forever = -1
  private final val Never = 0
  private final val DefaultPositive = 30

  private lazy val cachePolicy: Int = {
    val n = Try(Security.getProperty(CachePolicyProp).toInt)
      .orElse(Try(Security.getProperty(CachePolicyPropFallback).toInt))
      .getOrElse(DefaultPositive) // default
    if (n < 0) Forever else n
  }

  private lazy val negativeCachePolicy = {
    val n = Try(Security.getProperty(NegativeCachePolicyProp).toInt)
      .orElse(Try(Security.getProperty(NegativeCachePolicyPropFallback).toInt))
      .getOrElse(0) // default
    if (n < 0) Forever else n
  }

  private def getTtl(path: String, positive: Boolean): Long =
    config.getString(path) match {
      case "default" ⇒
        (if (positive) cachePolicy else negativeCachePolicy) match {
          case Never ⇒ Never
          case n if n > 0 ⇒ TimeUnit.SECONDS.toMillis(n)
          case _ ⇒ Long.MaxValue // forever if negative
        }
      case "forever" ⇒ Long.MaxValue
      case "never" ⇒ Never
      case _ ⇒ config.getDuration(path, TimeUnit.MILLISECONDS)
        .requiring(_ > 0, s"akka.io.dns.$path must be 'default', 'forever', 'never' or positive duration")
    }

  val positiveTtl: Long = getTtl("positive-ttl", positive = true)
  val negativeTtl: Long = getTtl("negative-ttl", positive = false)

  override def receive = {
    case Dns.Resolve(name) ⇒
      val answer = cache.cached(name) match {
        case Some(a) ⇒ a
        case None ⇒
          try {
            val answer = Dns.Resolved(name, InetAddress.getAllByName(name))
            if (positiveTtl != Never) cache.put(answer, positiveTtl)
            answer
          } catch {
            case e: UnknownHostException ⇒
              val answer = Dns.Resolved(name, immutable.Seq.empty, immutable.Seq.empty)
              if (negativeTtl != Never) cache.put(answer, negativeTtl)
              answer
          }
      }
      sender() ! answer
  }
} 
Example 113
Source File: SerialIntegrationTest.scala    From finagle-serial   with Apache License 2.0 5 votes vote down vote up
package io.github.finagle.serial.tests

import com.twitter.finagle.{Client, ListeningServer, Server, Service}
import com.twitter.util.{Await, Future, Try}
import io.github.finagle.serial.Serial
import java.net.{InetAddress, InetSocketAddress}
import org.scalatest.Matchers
import org.scalatest.prop.Checkers
import org.scalacheck.{Arbitrary, Gen, Prop}


  def testFunctionService[I, O](
    f: I => O
  )(implicit
    inCodec: C[I],
    outCodec: C[O],
    arb: Arbitrary[I]
  ): Unit = {
    val (fServer, fClient) = createServerAndClient(f)(inCodec, outCodec)

    check(serviceFunctionProp(fClient)(f)(arb.arbitrary))

    Await.result(fServer.close())
  }
} 
Example 114
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 115
Source File: Money.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core

import java.net.InetAddress

import com.comcast.money.api.{ SpanFactory, SpanHandler }
import com.comcast.money.core.async.{ AsyncNotificationHandler, AsyncNotifier }
import com.comcast.money.core.handlers.HandlerChain
import com.typesafe.config.{ Config, ConfigFactory }

case class Money(
  enabled: Boolean,
  handler: SpanHandler,
  applicationName: String,
  hostName: String,
  factory: SpanFactory,
  tracer: Tracer,
  logExceptions: Boolean = false,
  asyncNotifier: AsyncNotifier = new AsyncNotifier(Seq()))

object Money {

  lazy val Environment = apply(ConfigFactory.load().getConfig("money"))

  def apply(conf: Config): Money = {
    val applicationName = conf.getString("application-name")
    val enabled = conf.getBoolean("enabled")
    val hostName = InetAddress.getLocalHost.getCanonicalHostName

    if (enabled) {
      val handler = HandlerChain(conf.getConfig("handling"))
      val factory: SpanFactory = new CoreSpanFactory(handler)
      val tracer = new Tracer {
        override val spanFactory: SpanFactory = factory
      }
      val logExceptions = conf.getBoolean("log-exceptions")
      val asyncNotificationHandlerChain = AsyncNotifier(conf.getConfig("async-notifier"))
      Money(enabled, handler, applicationName, hostName, factory, tracer, logExceptions, asyncNotificationHandlerChain)
    } else {
      disabled(applicationName, hostName)
    }

  }

  private def disabled(applicationName: String, hostName: String): Money =
    Money(enabled = false, DisabledSpanHandler, applicationName, hostName, DisabledSpanFactory, DisabledTracer)
} 
Example 116
Source File: MoneySpec.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core

import java.net.InetAddress

import com.comcast.money.core.handlers.AsyncSpanHandler
import com.typesafe.config.ConfigFactory
import org.scalatest.{ Matchers, WordSpec }

class MoneySpec extends WordSpec with Matchers {

  val defaultConfig = ConfigFactory.load().getConfig("money")
  "Money" should {
    "load the reference config by default" in {
      val result = Money.Environment

      result.applicationName shouldBe "unknown"
      result.enabled shouldBe true
      result.factory shouldBe a[CoreSpanFactory]
      result.handler shouldBe an[AsyncSpanHandler]
      result.hostName shouldBe InetAddress.getLocalHost.getCanonicalHostName
      result.tracer should not be DisabledTracer
    }

    "load a Disabled Environment if money is disabled" in {
      val config = ConfigFactory.parseString(
        """
          |money {
          | enabled = false
          | application-name = "unknown"
          |}
        """.stripMargin)

      val result = Money(config.getConfig("money"))
      result.tracer shouldBe DisabledTracer
      result.factory shouldBe DisabledSpanFactory
      result.handler shouldBe DisabledSpanHandler
      result.enabled shouldBe false
    }
  }
} 
Example 117
Source File: FileActorUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.utils

import java.io.{BufferedOutputStream, File, FileOutputStream}
import java.net.InetAddress
import java.text.DecimalFormat
import java.util.function.Predicate

import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.serving.api.constants.HttpConstant
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.models.files.SpartaFile
import spray.http.BodyPart

import scala.util.{Failure, Success, Try}

trait FileActorUtils extends SLF4JLogging {

  //The dir where the files will be saved
  val targetDir: String
  val apiPath: String

  //Regexp for name validation
  val patternFileName: Option[Predicate[String]] = None

  def deleteFiles(): Try[_] =
    Try {
      val directory = new File(targetDir)
      if (directory.exists && directory.isDirectory)
        directory.listFiles.filter(_.isFile).toList.foreach { file =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(file.getName)))
            file.delete()
        }
    }

  def deleteFile(fileName: String): Try[_] =
    Try {
      val plugin = new File(s"$targetDir/$fileName")
      if (plugin.exists && !plugin.isDirectory)
        plugin.delete()
    }

  def browseDirectory(): Try[Seq[SpartaFile]] =
    Try {
      val directory = new File(targetDir)
      if (directory.exists && directory.isDirectory) {
        directory.listFiles.filter(_.isFile).toList.flatMap { file =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(file.getName)))
            Option(SpartaFile(file.getName, s"$url/${file.getName}", file.getAbsolutePath,
              sizeToMbFormat(file.length())))
          else None
        }
      } else Seq.empty[SpartaFile]
    }

  def uploadFiles(files: Seq[BodyPart]): Try[Seq[SpartaFile]] =
    Try {
      files.flatMap { file =>
        val fileNameOption = file.filename.orElse(file.name.orElse {
          log.warn(s"Is necessary one file name to upload files")
          None
        })
        fileNameOption.flatMap { fileName =>
          if (patternFileName.isEmpty || (patternFileName.isDefined && patternFileName.get.test(fileName))) {
            val localMachineDir = s"$targetDir/$fileName"

            Try(saveFile(file.entity.data.toByteArray, localMachineDir)) match {
              case Success(newFile) =>
                Option(SpartaFile(fileName, s"$url/$fileName", localMachineDir, sizeToMbFormat(newFile.length())))
              case Failure(e) =>
                log.error(s"Error saving file in path $localMachineDir", e)
                None
            }
          } else {
            log.warn(s"$fileName is Not a valid file name")
            None
          }
        }
      }
    }

  private def sizeToMbFormat(size: Long): String = {
    val formatter = new DecimalFormat("####.##")
    s"${formatter.format(size.toDouble / (1024 * 1024))} MB"
  }

  private def saveFile(array: Array[Byte], fileName: String): File = {
    log.info(s"Saving file to: $fileName")
    new File(fileName).getParentFile.mkdirs
    val bos = new BufferedOutputStream(new FileOutputStream(fileName))
    bos.write(array)
    bos.close()
    new File(fileName)
  }

  private def url: String = {
    val host = Try(InetAddress.getLocalHost.getHostName).getOrElse(SpartaConfig.apiConfig.get.getString("host"))
    val port = SpartaConfig.apiConfig.get.getInt("port")

    s"http://$host:$port/${HttpConstant.SpartaRootPath}/$apiPath"
  }
} 
Example 118
Source File: DistServiceExecutor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.experiments.distributeservice

import java.io.{File, FileWriter}
import java.net.InetAddress
import scala.collection.JavaConverters._
import scala.io.Source
import scala.sys.process._
import scala.util.{Failure, Success, Try}

import akka.actor.Actor
import org.apache.commons.io.FileUtils
import org.apache.commons.lang.text.StrSubstitutor
import org.slf4j.Logger

import org.apache.gearpump.cluster.{ExecutorContext, UserConfig}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.InstallService
import org.apache.gearpump.util.{ActorUtil, LogUtil}

class DistServiceExecutor(executorContext: ExecutorContext, userConf: UserConfig) extends Actor {
  import executorContext._
  private val LOG: Logger = LogUtil.getLogger(getClass, executor = executorId, app = appId)

  override def receive: Receive = {
    case InstallService(url, zipFileName, targetPath, scriptData, serviceName, serviceSettings) =>
      LOG.info(s"Executor $executorId receive command to install " +
        s"service $serviceName to $targetPath")
      unzipFile(url, zipFileName, targetPath)
      installService(scriptData, serviceName, serviceSettings)
  }

  private def unzipFile(url: String, zipFileName: String, targetPath: String) = {
    val zipFile = File.createTempFile(System.currentTimeMillis().toString, zipFileName)
    val dir = new File(targetPath)
    if (dir.exists()) {
      FileUtils.forceDelete(dir)
    }
    val bytes = FileServer.newClient.get(url).get
    FileUtils.writeByteArrayToFile(zipFile, bytes)
    val result = Try(s"unzip ${zipFile.getAbsolutePath} -d $targetPath".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor $executorId unzip file to $targetPath")
      case Failure(ex) => throw ex
    }
  }

  private def installService(
      scriptData: Array[Byte], serviceName: String, serviceSettings: Map[String, Any]) = {
    val tempFile = File.createTempFile("gearpump", serviceName)
    FileUtils.writeByteArrayToFile(tempFile, scriptData)
    val script = new File("/etc/init.d", serviceName)
    writeFileWithEnvVariables(tempFile, script, serviceSettings ++ getEnvSettings)
    val result = Try(s"chkconfig --add $serviceName".!!)
    result match {
      case Success(msg) => LOG.info(s"Executor install service $serviceName successfully!")
      case Failure(ex) => throw ex
    }
  }

  private def getEnvSettings: Map[String, Any] = {
    Map("workerId" -> worker,
      "localhost" -> ActorUtil.getSystemAddress(context.system).host.get,
      "hostname" -> InetAddress.getLocalHost.getHostName)
  }

  private def writeFileWithEnvVariables(source: File, target: File, envs: Map[String, Any]) = {
    val writer = new FileWriter(target)
    val sub = new StrSubstitutor(envs.asJava)
    sub.setEnableSubstitutionInVariables(true)
    Source.fromFile(source).getLines().foreach(line => writer.write(sub.replace(line) + "\r\n"))
    writer.close()
  }
} 
Example 119
Source File: VersionMessage.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.messages.control

import java.net.InetAddress

import org.bitcoins.core.config.NetworkParameters
import org.bitcoins.core.number.{Int32, Int64, UInt64}
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.util.Factory
import org.bitcoins.spvnode.messages._
import org.bitcoins.spvnode.serializers.messages.control.RawVersionMessageSerializer
import org.bitcoins.spvnode.versions.{ProtocolVersion}
import org.bitcoins.spvnode.constant.Constants
import org.joda.time.DateTime




object VersionMessage extends Factory[VersionMessage] {

  private case class VersionMessageImpl(version : ProtocolVersion, services : ServiceIdentifier, timestamp : Int64,
                                               addressReceiveServices : ServiceIdentifier, addressReceiveIpAddress : InetAddress,
                                               addressReceivePort : Int, addressTransServices : ServiceIdentifier,
                                               addressTransIpAddress : InetAddress, addressTransPort : Int,
                                               nonce : UInt64, userAgentSize : CompactSizeUInt, userAgent : String,
                                               startHeight : Int32, relay : Boolean) extends VersionMessage

  override def fromBytes(bytes : Seq[Byte]) : VersionMessage = RawVersionMessageSerializer.read(bytes)

  def apply(version : ProtocolVersion, services : ServiceIdentifier, timestamp : Int64,
            addressReceiveServices : ServiceIdentifier, addressReceiveIpAddress : InetAddress,
            addressReceivePort : Int, addressTransServices : ServiceIdentifier,
            addressTransIpAddress : InetAddress, addressTransPort : Int,
            nonce : UInt64,  userAgent : String,
            startHeight : Int32, relay : Boolean) : VersionMessage = {
    val userAgentSize : CompactSizeUInt = CompactSizeUInt.calculateCompactSizeUInt(userAgent.getBytes)
    VersionMessageImpl(version, services, timestamp, addressReceiveServices, addressReceiveIpAddress,
      addressReceivePort, addressTransServices, addressTransIpAddress, addressTransPort,
      nonce, userAgentSize, userAgent, startHeight, relay)
  }

  def apply(network : NetworkParameters, receivingIpAddress : InetAddress) : VersionMessage = {
    val transmittingIpAddress = InetAddress.getLocalHost
    VersionMessage(network,receivingIpAddress,transmittingIpAddress)
  }

  def apply(network : NetworkParameters, receivingIpAddress : InetAddress, transmittingIpAddress : InetAddress) : VersionMessage = {
    val nonce = UInt64.zero
    val userAgent = Constants.userAgent
    val startHeight = Int32.zero
    val relay = false
    VersionMessage(Constants.version, UnnamedService, Int64(DateTime.now.getMillis), UnnamedService, receivingIpAddress,
      network.port, NodeNetwork, transmittingIpAddress, network.port, nonce, userAgent, startHeight, relay)
  }

  def apply(network: NetworkParameters): VersionMessage = {
    val transmittingIpAddress = InetAddress.getByName(network.dnsSeeds(0))
    VersionMessage(network,transmittingIpAddress)
  }
} 
Example 120
Source File: ContainerInfo.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.health

import java.net.InetAddress
import java.util.jar.Attributes.Name
import java.util.jar.{Attributes, JarFile, Manifest}

import com.github.vonnagy.service.container.log.LoggingAdapter


  private[health] def getMainClass: Option[Class[_]] = {
    import scala.collection.JavaConverters._

    def checkStack(elem: StackTraceElement): Option[Class[_]] = try {
      if (elem.getMethodName.equals("main")) Some(Class.forName(elem.getClassName)) else None
    } catch {
      case _: ClassNotFoundException => {
        // Swallow the exception
        None
      }
    }

    Thread.getAllStackTraces.asScala.values.flatMap(currStack => {
      if (!currStack.isEmpty)
        checkStack(currStack.last)
      else
        None
    }).headOption match {
      case None =>
        sys.props.get("sun.java.command") match {
          case Some(command) if !command.isEmpty =>
            try {
              Some(Class.forName(command))
            } catch {
              // Swallow the exception
              case _: ClassNotFoundException =>
                None
            }

          // Nothing could be located
          case _ => None
        }
      case c => c
    }
  }

  private[health] def getManifest(clazz: Class[_]): Manifest = {
    val file: String = clazz.getProtectionDomain.getCodeSource.getLocation.getFile

    try {
      if (file.endsWith(".jar")) {
        new JarFile(file).getManifest
      }
      else {
        val manifest: Manifest = new Manifest
        manifest.getMainAttributes.put(Name.IMPLEMENTATION_TITLE, "Container Service")
        manifest.getMainAttributes.put(Name.IMPLEMENTATION_VERSION, "1.0.0")
        manifest.getMainAttributes.put(new Attributes.Name("Implementation-Build"), "N/A")
        manifest
      }
    }
    catch {
      case _: Exception => {
        val manifest: Manifest = new Manifest
        manifest.getMainAttributes.put(Name.IMPLEMENTATION_TITLE, "Container Service")
        manifest.getMainAttributes.put(Name.IMPLEMENTATION_VERSION, "1.0.0")
        manifest.getMainAttributes.put(new Attributes.Name("Implementation-Build"), "N/A")
        manifest
      }
    }
  }
} 
Example 121
Source File: UPnP.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.InetAddress

import com.wavesplatform.settings.UPnPSettings
import com.wavesplatform.utils.ScorexLogging
import org.bitlet.weupnp.{GatewayDevice, GatewayDiscover}

import scala.jdk.CollectionConverters._
import scala.util.Try

class UPnP(settings: UPnPSettings) extends ScorexLogging {

  private var gateway: Option[GatewayDevice] = None

  lazy val localAddress    = gateway.map(_.getLocalAddress)
  lazy val externalAddress = gateway.map(_.getExternalIPAddress).map(InetAddress.getByName)

  Try {
    log.info("Looking for UPnP gateway device...")
    val defaultHttpReadTimeout = settings.gatewayTimeout
    GatewayDevice.setHttpReadTimeout(defaultHttpReadTimeout.toMillis.toInt)
    val discover               = new GatewayDiscover()
    val defaultDiscoverTimeout = settings.discoverTimeout
    discover.setTimeout(defaultDiscoverTimeout.toMillis.toInt)

    val gatewayMap = Option(discover.discover).map(_.asScala.toMap).getOrElse(Map())
    if (gatewayMap.isEmpty) {
      log.debug("There are no UPnP gateway devices")
    } else {
      gatewayMap.foreach {
        case (addr, _) =>
          log.debug("UPnP gateway device found on " + addr.getHostAddress)
      }
      Option(discover.getValidGateway) match {
        case None => log.debug("There is no connected UPnP gateway device")
        case Some(device) =>
          gateway = Some(device)
          log.debug("Using UPnP gateway device on " + localAddress.map(_.getHostAddress).getOrElse("err"))
          log.info("External IP address is " + externalAddress.map(_.getHostAddress).getOrElse("err"))
      }
    }
  }.recover {
    case t: Throwable =>
      log.error("Unable to discover UPnP gateway devices: " + t.toString)
  }

  def addPort(port: Int): Try[Unit] =
    Try {
      if (gateway.get.addPortMapping(port, port, localAddress.get.getHostAddress, "TCP", "Scorex")) {
        log.debug("Mapped port [" + externalAddress.get.getHostAddress + "]:" + port)
      } else {
        log.debug("Unable to map port " + port)
      }
    }.recover {
      case t: Throwable =>
        log.error("Unable to map port " + port + ": " + t.toString)
    }

  def deletePort(port: Int): Try[Unit] =
    Try {
      if (gateway.get.deletePortMapping(port, "TCP")) {
        log.debug("Mapping deleted for port " + port)
      } else {
        log.debug("Unable to delete mapping for port " + port)
      }
    }.recover {
      case t: Throwable =>
        log.error("Unable to delete mapping for port " + port + ": " + t.toString)
    }
} 
Example 122
Source File: Handshake.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}

import com.google.common.base.Charsets
import io.netty.buffer.ByteBuf
import com.wavesplatform.utils._

case class Handshake(applicationName: String,
                     applicationVersion: (Int, Int, Int),
                     nodeName: String,
                     nodeNonce: Long,
                     declaredAddress: Option[InetSocketAddress]) {
  def encode(out: ByteBuf): out.type = {
    val applicationNameBytes = applicationName.utf8Bytes
    require(applicationNameBytes.length <= Byte.MaxValue, "The application name is too long!")
    out.writeByte(applicationNameBytes.length)
    out.writeBytes(applicationNameBytes)

    out.writeInt(applicationVersion._1)
    out.writeInt(applicationVersion._2)
    out.writeInt(applicationVersion._3)

    val nodeNameBytes = nodeName.utf8Bytes
    require(nodeNameBytes.length <= Byte.MaxValue, "A node name is too long!")
    out.writeByte(nodeNameBytes.length)
    out.writeBytes(nodeNameBytes)

    out.writeLong(nodeNonce)

    val peer = for {
      inetAddress <- declaredAddress
      address     <- Option(inetAddress.getAddress)
    } yield (address.getAddress, inetAddress.getPort)

    peer match {
      case None => out.writeInt(0)
      case Some((addressBytes, peerPort)) =>
        out.writeInt(addressBytes.length + Integer.BYTES)
        out.writeBytes(addressBytes)
        out.writeInt(peerPort)
    }

    out.writeLong(System.currentTimeMillis() / 1000)
    out
  }
}

object Handshake {
  class InvalidHandshakeException(msg: String) extends IllegalArgumentException(msg)

  def decode(in: ByteBuf): Handshake = {
    val appNameSize = in.readByte()

    if (appNameSize < 0 || appNameSize > Byte.MaxValue) {
      throw new InvalidHandshakeException(s"An invalid application name's size: $appNameSize")
    }
    val appName    = in.readSlice(appNameSize).toString(Charsets.UTF_8)
    val appVersion = (in.readInt(), in.readInt(), in.readInt())

    val nodeNameSize = in.readByte()
    if (nodeNameSize < 0 || nodeNameSize > Byte.MaxValue) {
      throw new InvalidHandshakeException(s"An invalid node name's size: $nodeNameSize")
    }
    val nodeName = in.readSlice(nodeNameSize).toString(Charsets.UTF_8)

    val nonce = in.readLong()

    val declaredAddressLength = in.readInt()
    // 0 for no declared address, 8 for ipv4 address + port, 20 for ipv6 address + port
    if (declaredAddressLength != 0 && declaredAddressLength != 8 && declaredAddressLength != 20) {
      throw new InvalidHandshakeException(s"An invalid declared address length: $declaredAddressLength")
    }
    val isa =
      if (declaredAddressLength == 0) None
      else {
        val addressBytes = new Array[Byte](declaredAddressLength - Integer.BYTES)
        in.readBytes(addressBytes)
        val address = InetAddress.getByAddress(addressBytes)
        val port    = in.readInt()
        Some(new InetSocketAddress(address, port))
      }
    in.readLong() // time is ignored

    Handshake(appName, appVersion, nodeName, nonce, isa)
  }
} 
Example 123
Source File: DTLSConnectionFn.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.coap.connection

import java.io.FileInputStream
import java.net.{ConnectException, InetAddress, InetSocketAddress, URI}
import java.security.cert.Certificate
import java.security.{KeyStore, PrivateKey}

import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSetting}
import com.typesafe.scalalogging.StrictLogging
import org.eclipse.californium.core.CoapClient
import org.eclipse.californium.core.coap.CoAP
import org.eclipse.californium.core.network.CoapEndpoint
import org.eclipse.californium.core.network.config.NetworkConfig
import org.eclipse.californium.scandium.DTLSConnector
import org.eclipse.californium.scandium.config.DtlsConnectorConfig
import org.eclipse.californium.scandium.dtls.cipher.CipherSuite
import org.eclipse.californium.scandium.dtls.pskstore.InMemoryPskStore


  def discoverServer(address: String, uri: URI): URI = {
    val client = new CoapClient(s"${uri.getScheme}://$address:${uri.getPort.toString}/.well-known/core")
    client.useNONs()
    val response = client.get()

    if (response != null) {
      logger.info(s"Discovered Server ${response.advanced().getSource.toString}.")
      new URI(uri.getScheme,
        uri.getUserInfo,
        response.advanced().getSource.getHostName,
        response.advanced().getSourcePort,
        uri.getPath,
        uri.getQuery,
        uri.getFragment)
    } else {
      logger.error(s"Unable to find any servers on local network with multicast address $address.")
      throw new ConnectException(s"Unable to find any servers on local network with multicast address $address.")
    }
  }
} 
Example 124
Source File: HadoopConfigurationExtension.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.net.InetAddress

import com.landoop.streamreactor.connect.hive.kerberos.{Kerberos, KeytabSettings, UserPasswordSettings}
import org.apache.hadoop.conf.Configuration

object HadoopConfigurationExtension {

  implicit class ConfigurationExtension(val configuration: Configuration) extends AnyVal {
    def withKerberos(kerberos: Kerberos): Unit = {
      configuration.set("hadoop.security.authentication", "kerberos")
      kerberos.auth match {
        case Left(keytab) => withKeyTab(keytab)
        case Right(userPwd) => withUserPassword(userPwd)
      }
    }

    def withUserPassword(settings: UserPasswordSettings): Unit = {
      System.setProperty("java.security.auth.login.config", settings.jaasPath)
      System.setProperty("java.security.krb5.conf", settings.krb5Path)
      System.setProperty("javax.security.auth.useSubjectCredsOnly", "false")

      settings.nameNodePrincipal.foreach(configuration.set("dfs.namenode.kerberos.principal", _))
      
    }
  }

} 
Example 125
Source File: FakeAbstractGettableByIndexData.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package com.datastax.driver.core

import java.net.InetAddress
import java.nio.ByteBuffer
import java.util.Date

class FakeAbstractGettableByIndexData(pv: ProtocolVersion, data: (ByteBuffer, DataType)*) extends AbstractGettableByIndexData(pv) {
  override def getType(i: Int): DataType = data(i)._2

  override def getValue(i: Int): ByteBuffer = data(i)._1

  override def getName(i: Int): String = ""

  override def getCodecRegistry: CodecRegistry = ???
}
object FakeAbstractGettableByIndexData {
  def ascii(value: String, pv: ProtocolVersion = ProtocolVersion.V4) = new FakeAbstractGettableByIndexData(
    pv, TypeCodec.ascii.serialize(value, pv) -> DataType.ascii()
  )

  def timestamp(value: Date, pv: ProtocolVersion = ProtocolVersion.V4) = new FakeAbstractGettableByIndexData(
    pv, TypeCodec.timestamp.serialize(value, pv) -> DataType.timestamp()
  )

  def decimal(value: java.math.BigDecimal, pv: ProtocolVersion = ProtocolVersion.V4) = new FakeAbstractGettableByIndexData(
    pv, TypeCodec.decimal.serialize(value, pv) -> DataType.decimal()
  )

  def inet(value: InetAddress, pv: ProtocolVersion = ProtocolVersion.V4) = new FakeAbstractGettableByIndexData(
    pv, TypeCodec.inet.serialize(value, pv) -> DataType.inet()
  )

  def date(value: LocalDate, pv: ProtocolVersion = ProtocolVersion.V4) = new FakeAbstractGettableByIndexData(
    pv, TypeCodec.date.serialize(value, pv) -> DataType.date()
  )
} 
Example 126
Source File: HasTypeCodec.scala    From troy   with Apache License 2.0 5 votes vote down vote up
package troy
package driver.codecs

import java.net.InetAddress
import java.nio.ByteBuffer
import java.util.{ Date, UUID }

import com.datastax.driver.core.{ LocalDate, TypeCodec }
import troy.driver.{ CassandraDataType => CT }

import scala.annotation.implicitNotFound

case class HasTypeCodec[S, C <: CT](typeCodec: TypeCodec[S]) extends AnyVal

object HasTypeCodec {
  import TypeCodec._

  implicit val BooleanHasCodecAsBoolean = HasTypeCodec[java.lang.Boolean, CT.Boolean](cboolean)
  implicit val TinyIntHasCodecAsByte = HasTypeCodec[java.lang.Byte, CT.TinyInt](tinyInt)
  implicit val SmallIntHasCodecAsShort = HasTypeCodec[java.lang.Short, CT.SmallInt](smallInt)
  implicit val IntHasCodecAsInteger = HasTypeCodec[java.lang.Integer, CT.Int](cint)
  implicit val BigIntHasCodecAsLong = HasTypeCodec[java.lang.Long, CT.BigInt](bigint)
  implicit val CounterHasCodecAsLong = HasTypeCodec[java.lang.Long, CT.Counter](counter)
  implicit val FloatHasCodecAsFloat = HasTypeCodec[java.lang.Float, CT.Float](cfloat)
  implicit val DoubleHasCodecAsDouble = HasTypeCodec[java.lang.Double, CT.Double](cdouble)
  implicit val VarIntHasCodecAsBigInteger = HasTypeCodec[java.math.BigInteger, CT.VarInt](varint)
  implicit val DecimalHasCodecAsBigDecimal = HasTypeCodec[java.math.BigDecimal, CT.Decimal](decimal)
  implicit val AsciiHasCodecAsString = HasTypeCodec[String, CT.Ascii](ascii)
  implicit val VarCharHasCodecAsString = HasTypeCodec[String, CT.VarChar](varchar)
  implicit val TextHasCodecAsString = HasTypeCodec[String, CT.Text](varchar)
  implicit val BlobHasCodecAsByteBuffer = HasTypeCodec[ByteBuffer, CT.Blob](blob)
  implicit val DateHasCodecAsLocalDate = HasTypeCodec[LocalDate, CT.Date](date)
  implicit val TimeHasCodecAsLong = HasTypeCodec[java.lang.Long, CT.Time](time)
  implicit val TimestampHasCodecAsDate = HasTypeCodec[Date, CT.Timestamp](timestamp)
  implicit val UuidHasCodecAsUUID = HasTypeCodec[UUID, CT.Uuid](uuid)
  implicit val TimeUuidHasCodecAsUUID = HasTypeCodec[UUID, CT.TimeUuid](timeUUID)
  implicit val InetHasCodecAsInetAddress = HasTypeCodec[InetAddress, CT.Inet](inet)
} 
Example 127
Source File: TCPListener.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package communicator
package tcp

import java.io.IOException
import java.net.{InetAddress, ServerSocket, SocketException}
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal

private class TCPListener(
  port: Int, interface: String, properties: TCP.Properties)
    extends Listener[TCP] {

  protected def startListening(connectionEstablished: Connected[TCP]): Try[Listening] =
    try {
      val running = new AtomicBoolean(true)
      val socket = new ServerSocket(port, 0, InetAddress.getByName(interface))
      val executor = Executors.newCachedThreadPool()

      def terminate() = {
        try socket.close()
        catch { case _: IOException => }
        executor.shutdown()
      }

      new Thread() {
        override def run() =
          try
            while (true) {
              val connection = socket.accept()
              if (connection != null)
                executor.execute(new Runnable {
                  def run() = TCPHandler.handleConnection(
                    connection, properties, TCPListener.this, { connection =>
                      connectionEstablished.fire(Success(connection))
                    })
                })
            }
          catch {
            case exception: SocketException =>
              if (running.getAndSet(false)) {
                terminate()
                connectionEstablished.fire(Failure(exception))
              }
          }
      }.start()

      Success(new Listening {
        def stopListening(): Unit =
          if (running.getAndSet(false))
            terminate()
      })
    }
    catch {
      case NonFatal(exception) =>
        Failure(exception)
    }
} 
Example 128
Source File: HealthEndpointsSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.health

import java.net.InetAddress

import akka.http.scaladsl.model.headers.{Accept, `Remote-Address`}
import akka.http.scaladsl.model.{ContentTypes, MediaTypes, RemoteAddress, StatusCodes}
import com.github.vonnagy.service.container.Specs2RouteTest
import com.github.vonnagy.service.container.http.routing.Rejection.NotFoundRejection
import org.specs2.mutable.Specification

import scala.concurrent.Future

class HealthEndpointsSpec extends Specification with Specs2RouteTest {

  sequential

  val endpoints = new HealthEndpoints()(system, system.dispatcher)
  def remoteAddress(ip: String) = RemoteAddress(InetAddress.getByName(ip))

  "The routing infrastructure" should {

    "support a call to /health" in {
      Get("/health").withHeaders(`Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        contentType === ContentTypes.`application/json`
        status must be equalTo (StatusCodes.OK)
      }
    }

    "support a call to /health that should return json" in {
      Get("/health").withHeaders(Accept(MediaTypes.`application/json`),
        `Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        mediaType === MediaTypes.`application/json`
        contentType === ContentTypes.`application/json`
      }
    }

    "a call to /health should return an error due to CIDR rules" in {
      Get("/health").withHeaders(Accept(MediaTypes.`application/json`),
        `Remote-Address`(remoteAddress("192.168.1.1"))) ~> endpoints.route ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }

    "support a call to /health/lb" in {
      Get("/health/lb").withHeaders(Accept(MediaTypes.`text/plain`),
        `Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        mediaType === MediaTypes.`text/plain`
        responseAs[String].equals("UP")
      }
    }

    "support a call to health/lb that returns a status of `Ok` when a health check is marked as degraded" in {
      Health(system).addCheck(new HealthCheck {
        override def getHealth: Future[HealthInfo] = Future {
          HealthInfo("degraded", HealthState.DEGRADED, "")
        }
      })

      Get("/health/lb").withHeaders(Accept(MediaTypes.`text/plain`),
        `Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        mediaType === MediaTypes.`text/plain`
        status === StatusCodes.OK
        responseAs[String].equals("UP")
      }
    }

    "support a call to health/lb that returns a status of `ServiceUnavailable` when a health check is marked as critical" in {
      Health(system).addCheck(new HealthCheck {
        override def getHealth: Future[HealthInfo] = Future {
          HealthInfo("critical", HealthState.CRITICAL, "")
        }
      })

      Get("/health/lb").withHeaders(Accept(MediaTypes.`text/plain`),
        `Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        //handled must beTrue
        mediaType === MediaTypes.`text/plain`
        status === StatusCodes.ServiceUnavailable
        responseAs[String].equals("DOWN")
      }
    }

  }
} 
Example 129
Source File: BaseEndpointsSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http

import java.net.InetAddress

import akka.http.scaladsl.model.headers.`Remote-Address`
import akka.http.scaladsl.model.{RemoteAddress, StatusCodes}
import akka.http.scaladsl.server.Directives
import com.github.vonnagy.service.container.Specs2RouteTest
import com.github.vonnagy.service.container.http.routing.Rejection.NotFoundRejection
import org.specs2.mutable.Specification

class BaseEndpointsSpec extends Specification with Directives with Specs2RouteTest {

  val endpoints = new BaseEndpoints

  "The base routing infrastructure" should {

    "return no content for favicon.ico" in {
      Get("/favicon.ico") ~> endpoints.route ~> check {
        status must be equalTo StatusCodes.NoContent
      }
    }

    "support a call to ping" in {
      Get("/ping") ~> endpoints.route ~> check {
        responseAs[String] must startWith("pong")
        status must beEqualTo(StatusCodes.OK)
      }
    }

    "a call to shutdown should return and error due to CIDR rules" in {
      Post("/shutdown").withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.1.1")))) ~> endpoints.route ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }

  }

} 
Example 130
Source File: CIDRDirectivesSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http.directives

import java.net.InetAddress

import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.{RemoteAddress, StatusCodes}
import com.github.vonnagy.service.container.Specs2RouteTest
import com.github.vonnagy.service.container.http.routing.Rejection.NotFoundRejection
import org.specs2.mutable.Specification


class CIDRDirectivesSpec extends Specification with CIDRDirectives with Specs2RouteTest {

  val yeah = complete("Yeah!")

  def remoteAddress(ip: String) = RemoteAddress(InetAddress.getByName(ip))

  "CIDRDirectives" should {

    "allow call when no allows or denies" in {
      Get() ~> addHeaders(`Remote-Address`(remoteAddress("192.168.1.1"))) ~> {
        cidrFilter(Seq(), Seq()) {
          yeah
        }
      } ~> check {
        handled === true
        status === StatusCodes.OK
      }
    }

    "allow call when no denies, but matches allow" in {
      Get() ~> addHeaders(`Remote-Address`(remoteAddress("192.168.1.1"))) ~> {
        cidrFilter(Seq("192.168.1.1/1"), Seq()) {
          yeah
        }
      } ~> check {
        handled === true
        status === StatusCodes.OK
      }
    }

    "allow call when does not match deny, but matches allow" in {
      Get() ~> addHeaders(`Remote-Address`(remoteAddress("192.168.1.1"))) ~> {
        cidrFilter(Seq("192.168.1.1/1"), Seq("10.0.0.1/1")) {
          yeah
        }
      } ~> check {
        handled === true
        status === StatusCodes.OK
      }
    }

    "disallow call when no denies and does not match allow" in {
      Get() ~> addHeaders(`Remote-Address`(remoteAddress("192.168.1.1"))) ~> {
        cidrFilter(Seq("127.0.0.1/1"), Seq()) {
          yeah
        }
      } ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }

    "disallow call when matches a deny" in {
      Get() ~> addHeaders(`Remote-Address`(remoteAddress("10.0.0.1"))) ~> {
        cidrFilter(Seq("192.168.1.1/1"), Seq("10.0.0.1/1")) {
          yeah
        }
      } ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }

    "disallow because there is no remote address header that has been injected" in {
      Get() ~> {
        cidrFilter(Seq(), Seq()) {
          yeah
        }
      } ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }
  }
} 
Example 131
Source File: MetricsEndpointsSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.metrics

import java.net.InetAddress

import akka.http.scaladsl.model.headers.{Accept, `Remote-Address`}
import akka.http.scaladsl.model.{ContentTypes, MediaTypes, RemoteAddress, StatusCodes}
import com.github.vonnagy.service.container.Specs2RouteTest
import com.github.vonnagy.service.container.http.routing.Rejection.NotFoundRejection
import org.specs2.mutable.Specification

class MetricsEndpointsSpec extends Specification with Specs2RouteTest {

  val endpoints = new MetricsEndpoints()(system, system.dispatcher)

  def remoteAddress(ip: String) = RemoteAddress(InetAddress.getByName(ip))

  "The routing infrastructure should support" should {

    "a call to /metrics to be handled" in {
      Get("/metrics").withHeaders(`Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        contentType === ContentTypes.`application/json`
        status must be equalTo (StatusCodes.OK)
      }
    }

    "a call to /metrics should return json" in {
      Get("/metrics").withHeaders(Accept(MediaTypes.`application/json`),
        `Remote-Address`(remoteAddress("127.0.0.1"))) ~> endpoints.route ~> check {
        handled must beTrue
        contentType === ContentTypes.`application/json`
      }
    }

    "a call to /metrics should return an error due to CIDR rules" in {
      Get("/metrics").withHeaders(Accept(MediaTypes.`application/json`),
        `Remote-Address`(remoteAddress("192.168.1.1"))) ~> endpoints.route ~> check {
        handled must beFalse
        rejections.size must beEqualTo(1)
        rejections.head must be equalTo(NotFoundRejection("The requested resource could not be found"))
      }
    }
  }

} 
Example 132
Source File: InboundConnectionFilter.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelFuture, ChannelHandlerContext}
import io.netty.handler.ipfilter.AbstractRemoteAddressFilter

@Sharable
class InboundConnectionFilter(peerDatabase: PeerDatabase, maxInboundConnections: Int, maxConnectionsPerHost: Int)
    extends AbstractRemoteAddressFilter[InetSocketAddress]
    with ScorexLogging {
  private val inboundConnectionCount = new AtomicInteger(0)
  private val perHostConnectionCount = new ConcurrentHashMap[InetAddress, Int]
  private val emptyChannelFuture     = null.asInstanceOf[ChannelFuture]

  private def dec(remoteAddress: InetAddress) = {
    inboundConnectionCount.decrementAndGet()
    log.trace(s"Number of inbound connections: ${inboundConnectionCount.get()}")
    perHostConnectionCount.compute(remoteAddress, (_, cnt) => cnt - 1)
    emptyChannelFuture
  }

  override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Boolean = Option(remoteAddress.getAddress) match {
    case None =>
      log.debug(s"Can't obtain an address from $remoteAddress")
      false

    case Some(address) =>
      val newTotal        = inboundConnectionCount.incrementAndGet()
      val newCountPerHost = perHostConnectionCount.compute(address, (_, cnt) => Option(cnt).fold(1)(_ + 1))
      val isBlacklisted   = peerDatabase.blacklistedHosts.contains(address)

      val accepted = newTotal <= maxInboundConnections &&
        newCountPerHost <= maxConnectionsPerHost &&
        !isBlacklisted

      log.trace(
        s"Check inbound connection from $remoteAddress: new inbound total = $newTotal, " +
          s"connections with this host = $newCountPerHost, address ${if (isBlacklisted) "IS" else "is not"} blacklisted, " +
          s"${if (accepted) "is" else "is not"} accepted"
      )

      accepted
  }

  override def channelAccepted(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): Unit =
    ctx.channel().closeFuture().addListener((_: ChannelFuture) => Option(remoteAddress.getAddress).foreach(dec))

  override def channelRejected(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress): ChannelFuture =
    Option(remoteAddress.getAddress).fold(emptyChannelFuture)(dec)
} 
Example 133
Source File: Cassandra.scala    From unicorn   with Apache License 2.0 5 votes vote down vote up
package unicorn.bigtable.cassandra

import java.util.Properties
import java.net.{InetAddress, UnknownHostException}
import scala.collection.JavaConversions._
import org.apache.cassandra.locator.SimpleSnitch
import org.apache.cassandra.thrift.Cassandra.Client
import org.apache.cassandra.thrift.{ConsistencyLevel, KsDef, CfDef}
import org.apache.thrift.transport.TFramedTransport
import org.apache.thrift.transport.TSocket
import org.apache.thrift.protocol.TBinaryProtocol
import unicorn.bigtable._
import unicorn.util.Logging


  override def compactTable(name: String): Unit = {
    // fail silently
    log.warn("Cassandra client API doesn't support compaction")
  }
}

object Cassandra {
  def apply(host: String, port: Int): Cassandra = {
    // For ultra-wide row, we set the maxLength to 16MB.
    // Note that we also need to set the server side configuration
    // thrift_framed_transport_size_in_mb in cassandra.yaml
    // In case of ultra-wide row, it is better to use intra row scan.
    val transport = new TFramedTransport(new TSocket(host, port), 16 * 1024 * 1024)
    transport.open

    new Cassandra(transport)
  }
} 
Example 134
Source File: ServerHostTest.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.server

import java.net.{HttpURLConnection, InetAddress, InetSocketAddress, URL}

import org.scalamock.scalatest.MockFactory
import org.scalatest.{FreeSpec, Matchers}
import polynote.app.{App, Args, Environment, MainArgs}
import polynote.config._
import polynote.kernel.{BaseEnv, Kernel}
import polynote.kernel.environment.Config
import polynote.kernel.environment.Env.LayerOps
import polynote.kernel.interpreter.Interpreter
import polynote.kernel.logging.Logging
import polynote.server.auth.IdentityProvider
import polynote.server.repository.NotebookRepository
import polynote.server.repository.fs.FileSystems
import polynote.testing.{ConfiguredZIOSpec, ZIOSpec}
import zio.{RIO, Task, ZIO, ZLayer}
import zio.blocking.effectBlocking

class ServerHostTest extends FreeSpec with Matchers with ConfiguredZIOSpec with MockFactory {
  override val config: PolynoteConfig = PolynoteConfig(
    listen = Listen(host = "0.0.0.0", port = 0)
  )

  val configLayer: ZLayer[BaseEnv, Nothing, Config] = ZLayer.succeed(config)

  private def request(uri: String) = effectBlocking {
    val conn = new URL(uri).openConnection().asInstanceOf[HttpURLConnection]
    conn.setConnectTimeout(500)
    conn.connect()
    val responseCode = conn.getResponseCode
    responseCode shouldEqual 200
  }

  "Server" - {

    "listens on all interfaces when given listen=0.0.0.0" ignore {
      val kernel        = mock[Kernel]
      val kernelFactory = Kernel.Factory.const(kernel)
      val server        = new Server

      val serverEnv: ZLayer[BaseEnv, Throwable, server.MainEnv with MainArgs] =
        (configLayer andThen IdentityProvider.layer) ++
          Interpreter.Factories.load ++ ZLayer.succeed(kernelFactory) ++ ZLayer.succeed(Args(watchUI = true)) ++
          (configLayer ++ FileSystems.live >>> NotebookRepository.live)

      val run = server.server("TESTKEY").provideSomeLayer[BaseEnv](serverEnv).use {
        server =>
          for {
            localAddress <- effectBlocking(InetAddress.getLocalHost.getCanonicalHostName)
            _            <- server.awaitUp
            port         <- server.localAddress.map(_.asInstanceOf[InetSocketAddress].getPort)
            _            <- request(s"http://$localAddress:$port/")
            _            <- request(s"http://127.0.0.1:$port/")
            _            <- server.shutdown()
          } yield ()
      }

      run.runIO()
    }

  }

} 
Example 135
Source File: HogEvent.scala    From hogzilla   with GNU General Public License v2.0 5 votes vote down vote up
package org.hogzilla.event

import java.util.HashMap
import java.util.Map
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.util.Bytes
import org.hogzilla.hbase.HogHBaseRDD
import org.hogzilla.util.HogFlow
import java.net.InetAddress


class HogEvent(flow:HogFlow) 
{
	var sensorid:Int=0
	var signature_id:Double=0
	var priorityid:Int=0
	var text:String=""
	var data:Map[String,String]=new HashMap()
  var ports:String=""
  var title:String=""
  var username:String=""
  var coords:String=""
 
  
  def formatIPtoBytes(ip:String):Array[Byte] =
  {
    try {
       // Eca! Snorby doesn't support IPv6 yet. See https://github.com/Snorby/snorby/issues/65
    if(ip.contains(":"))
      InetAddress.getByName("255.255.6.6").getAddress
    else  
      InetAddress.getByName(ip).getAddress
    } catch {
      case t: Throwable => 
        // Bogus address!
        InetAddress.getByName("255.255.1.1").getAddress
    }   
   
  }

  
   def alert()
   {
	   val put = new Put(Bytes.toBytes(flow.get("flow:id")))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("note"), Bytes.toBytes(text))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("lower_ip"), formatIPtoBytes(flow.lower_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("upper_ip"), formatIPtoBytes(flow.upper_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("lower_ip_str"), Bytes.toBytes(flow.lower_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("upper_ip_str"), Bytes.toBytes(flow.upper_ip))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("signature_id"), Bytes.toBytes("%.0f".format(signature_id)))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("time"), Bytes.toBytes(System.currentTimeMillis))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("ports"), Bytes.toBytes(ports))
     put.add(Bytes.toBytes("event"), Bytes.toBytes("title"), Bytes.toBytes(title))
     
     if(!username.equals(""))
       put.add(Bytes.toBytes("event"), Bytes.toBytes("username"), Bytes.toBytes(username))
     if(!coords.equals(""))
       put.add(Bytes.toBytes("event"), Bytes.toBytes("coords"), Bytes.toBytes(coords))
     
     HogHBaseRDD.hogzilla_events.put(put)

     //println(f"ALERT: $text%100s\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
   }
} 
Example 136
Source File: PacketProxy.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino

import java.net.{InetAddress, InetSocketAddress, SocketAddress}
import com.ebay.neutrino.util.Utilities

import scala.concurrent.Future
import scala.util.{Failure, Success}

import com.typesafe.scalalogging.slf4j.StrictLogging
import io.netty.bootstrap.{Bootstrap, ServerBootstrap}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel._
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel}
import io.netty.util.AttributeKey



  override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) {
    println("Writing packet from downstream to upstream...")
    upstream.writeAndFlush(msg)
    //ctx.fireChannelRead(msg)
  }

  override def channelInactive(ctx: ChannelHandlerContext): Unit = {
    println("Downstream closing..")
    upstream.close()
    ctx.fireChannelInactive()
  }
} 
Example 137
Source File: ServerContext.scala    From Neutrino   with Apache License 2.0 5 votes vote down vote up
package com.ebay.neutrino.util

import java.net.{UnknownHostException, InetAddress}

import com.typesafe.scalalogging.slf4j.StrictLogging


object ServerContext extends StrictLogging {


  val fullHostName =  {
    var hostName : String = "Not Available"
    try {
      hostName = InetAddress.getLocalHost.getHostName
    }
    catch {
      case ex : UnknownHostException =>
        logger.warn("Unable to get the hostname")
    }
    hostName
  }

  val canonicalHostName = {
    var hostName : String = "Not Available"
    try {
      hostName = InetAddress.getLocalHost.getCanonicalHostName
    }
    catch {
      case ex : UnknownHostException =>
        logger.warn("Unable to get the hostname")
    }
    hostName
  }

  val hostAddress = {
    var hostAddress : String = "Not Available"
    try {
      hostAddress = InetAddress.getLocalHost.getHostAddress
    }
    catch {
      case ex : UnknownHostException =>
        logger.warn("Unable to get the hostaddress")
    }

    hostAddress
  }


} 
Example 138
Source File: NetworkUtils.scala    From asura   with MIT License 5 votes vote down vote up
package asura.common.util

import java.net.{InetAddress, NetworkInterface}

import scala.collection.JavaConverters._

object NetworkUtils {

  def getLocalIpAddress(): String = {
    val interfaces = NetworkInterface.getNetworkInterfaces.asScala.toSeq
    val ipAddresses = interfaces.flatMap(_.getInetAddresses.asScala.toSeq)
    val address = ipAddresses.find(address => {
      val host = address.getHostAddress
      host.contains(".") && !address.isLoopbackAddress
    }).getOrElse(InetAddress.getLocalHost)
    address.getHostAddress
  }
} 
Example 139
Source File: LauncherBackend.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.net.{InetAddress, Socket}

import org.apache.spark.SPARK_VERSION
import org.apache.spark.launcher.LauncherProtocol._
import org.apache.spark.util.{ThreadUtils, Utils}


  protected def onDisconnected() : Unit = { }

  private def fireStopRequest(): Unit = {
    val thread = LauncherBackend.threadFactory.newThread(new Runnable() {
      override def run(): Unit = Utils.tryLogNonFatalError {
        onStopRequest()
      }
    })
    thread.start()
  }

  private class BackendConnection(s: Socket) extends LauncherConnection(s) {

    override protected def handle(m: Message): Unit = m match {
      case _: Stop =>
        fireStopRequest()

      case _ =>
        throw new IllegalArgumentException(s"Unexpected message type: ${m.getClass().getName()}")
    }

    override def close(): Unit = {
      try {
        super.close()
      } finally {
        onDisconnected()
        _isConnected = false
      }
    }

  }

}

private object LauncherBackend {

  val threadFactory = ThreadUtils.namedThreadFactory("LauncherBackend")

} 
Example 140
Source File: DockerUtils.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{Inet4Address, NetworkInterface, InetAddress}

import scala.collection.JavaConverters._
import scala.sys.process._
import scala.util.Try

private[spark] object DockerUtils {

  def getDockerIp(): String = {
    
    def findFromDockerMachine(): Option[String] = {
      sys.env.get("DOCKER_MACHINE_NAME").flatMap { name =>
        Try(Seq("/bin/bash", "-c", s"docker-machine ip $name 2>/dev/null").!!.trim).toOption
      }
    }
    sys.env.get("DOCKER_IP")
      .orElse(findFromDockerMachine())
      .orElse(Try(Seq("/bin/bash", "-c", "boot2docker ip 2>/dev/null").!!.trim).toOption)
      .getOrElse {
        // This block of code is based on Utils.findLocalInetAddress(), but is modified to blacklist
        // certain interfaces.
        val address = InetAddress.getLocalHost
        // Address resolves to something like 127.0.1.1, which happens on Debian; try to find
        // a better address using the local network interfaces
        // getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
        // on unix-like system. On windows, it returns in index order.
        // It's more proper to pick ip address following system output order.
        val blackListedIFs = Seq(
          "vboxnet0",  // Mac
          "docker0"    // Linux
        )
        val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter { i =>
          !blackListedIFs.contains(i.getName)
        }
        val reOrderedNetworkIFs = activeNetworkIFs.reverse
        for (ni <- reOrderedNetworkIFs) {
          val addresses = ni.getInetAddresses.asScala
            .filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
          if (addresses.nonEmpty) {
            val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
            // because of Inet6Address.toHostName may add interface at the end if it knows about it
            val strippedAddress = InetAddress.getByAddress(addr.getAddress)
            return strippedAddress.getHostAddress
          }
        }
        address.getHostAddress
      }
  }
} 
Example 141
Source File: SocketGenerator.scala    From flink-demos   with Apache License 2.0 5 votes vote down vote up
package com.dataartisans.flink.example.eventpattern.Socket

import java.net.{InetAddress, InetSocketAddress}
import java.nio.{ByteOrder, ByteBuffer}
import java.nio.channels.SocketChannel

import com.dataartisans.flink.example.eventpattern.{StandaloneGeneratorBase, Event}
import org.apache.flink.util.Collector

object SocketGenerator extends StandaloneGeneratorBase {

  val BASE_PORT = 51762

  def main(args: Array[String]): Unit = {

    val numPartitions = 4 //args(0).toInt
    val collectors = new Array[SocketCollector](numPartitions)

    // create the generator threads
    for (i <- 0 until collectors.length) {
      collectors(i) = new SocketCollector(BASE_PORT + i)
    }

    runGenerator(collectors)
  }
}

class SocketCollector(val port: Int) extends Collector[Event] {

  val channel = SocketChannel.open(new InetSocketAddress(InetAddress.getByName("localhost"), port))
  channel.configureBlocking(true)
  channel.finishConnect()

  val buffer = ByteBuffer.allocateDirect(4096).order(ByteOrder.LITTLE_ENDIAN)

  override def collect(t: Event): Unit = {
    if (buffer.remaining() < 8) {
      buffer.flip()
      channel.write(buffer)
      buffer.clear()
    }

    buffer.putInt(t.sourceAddress)
    buffer.putInt(t.event)
  }

  override def close(): Unit = {
    if (buffer.position() > 0) {
      buffer.flip()
      channel.write(buffer)
    }
    channel.close()
  }
} 
Example 142
Source File: IpAddress.scala    From fastunfolding   with Apache License 2.0 5 votes vote down vote up
package com.soteradefense.dga.graphx.louvain

import java.net.InetAddress
import java.nio.ByteBuffer

object IpAddress {
  
  def toString(address: Long) = {
    val byteBuffer = ByteBuffer.allocate(8)
    val addressBytes = byteBuffer.putLong(address)
    // The below is needed because we don't have an unsigned Long, and passing a byte array
    // with more than 4 bytes causes InetAddress to interpret it as a (bad) IPv6 address
    val tmp = new Array[Byte](4)
    Array.copy(addressBytes.array, 4, tmp, 0, 4)
    InetAddress.getByAddress(tmp).getHostAddress()
  }
  
  
  def toLong(_address: String): Long = {
    val address = try {
      InetAddress.getByName(_address)
    } catch {
      case e: Throwable => throw new IllegalArgumentException("Could not parse address: " + e.getMessage)
    }
    val addressBytes = address.getAddress
    val bb = ByteBuffer.allocate(8)
    addressBytes.length match {
      case 4 =>
        bb.put(Array[Byte](0,0,0,0)) // Need a filler
        bb.put(addressBytes)
      case n =>
        throw new IndexOutOfBoundsException("Expected 4 byte address, got " + n)
    }
    bb.getLong(0)
  }

} 
Example 143
Source File: MetadataService.scala    From kafka-with-akka-streams-kafka-streams-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.scala.kafkastreams.queriablestate

import java.net.InetAddress
import java.util

import com.lightbend.java.configuration.kafka.ApplicationKafkaParameters
import com.lightbend.scala.kafkastreams.store.HostStoreInfo
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.streams.KafkaStreams
import org.apache.kafka.streams.state.{HostInfo, StreamsMetadata}

import scala.collection.JavaConverters._


  def streamsMetadataForStore(store: String, port: Int): util.List[HostStoreInfo] = { // Get metadata for all of the instances of this Kafka Streams application hosting the store
    val metadata = streams.allMetadataForStore(store).asScala.toSeq match{
      case list if !list.isEmpty => list
      case _ => Seq(new StreamsMetadata(
        new HostInfo("localhost", port),
        new util.HashSet[String](util.Arrays.asList(ApplicationKafkaParameters.STORE_NAME)), util.Collections.emptySet[TopicPartition]))
    }
    mapInstancesToHostStoreInfo(metadata)
  }


  private def mapInstancesToHostStoreInfo(metadatas: Seq[StreamsMetadata]) = metadatas.map(convertMetadata(_)).asJava

  private def convertMetadata(metadata: StreamsMetadata) : HostStoreInfo = {
    val currentHost = metadata.host match{
      case host if host equalsIgnoreCase("localhost") =>
        try{InetAddress.getLocalHost.getHostAddress}
        catch {case t: Throwable => ""}
      case host => host
    }
     new HostStoreInfo(currentHost, metadata.port, metadata.stateStoreNames.asScala.toSeq)
  }
} 
Example 144
Source File: System.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.plugins.system

import java.net.InetAddress
import java.util.Date

import com.sumologic.sumobot.core.Bootstrap
import com.sumologic.sumobot.core.model.IncomingMessage
import com.sumologic.sumobot.plugins.{BotPlugin, OperatorLimits}

import scala.concurrent.duration._

class System
  extends BotPlugin
    with OperatorLimits {
  override protected def help =
    """Low-level system stuff:
      |
      |where are you running? - And I'll tell you which host I'm on.
      |when did you start? - I'll tell you when I was started.
      |die on <hostname> - Will cause me to shut down if you're allowed and I'm on that host.
    """.stripMargin

  private val WhereAreYou = matchText("where are you running.*")
  private val WhenDidYouStart = matchText("when did you (start|launch|boot).*")
  private val DieOn = matchText("die on ([a-zA-Z0-9\\.\\-]+)") // Per RFC952.

  private val hostname = InetAddress.getLocalHost.getHostName
  private val hostAddress = InetAddress.getLocalHost.getHostAddress
  private val startTime = new Date().toString

  override protected def receiveIncomingMessage = {
    case message@IncomingMessage(WhereAreYou(), true, _, _, _, _, _) =>
      message.respond(s"I'm running at $hostname ($hostAddress)")
    case message@IncomingMessage(WhenDidYouStart(_), true, _, _, _, _, _) =>
      message.respond(s"I started at $startTime")
    case message@IncomingMessage(DieOn(host), true, _, _, _, _, _) =>

      if (host.trim.equalsIgnoreCase(hostname)) {
        if (!sentByOperator(message)) {
          message.respond(s"Sorry, ${message.sentBy.slackReference}, I can't do that.")
        } else {
          message.respond(s"Sayonara, ${message.sentBy.slackReference}!")
          context.system.scheduler.scheduleOnce(2.seconds, new Runnable {
            override def run() = {
              Bootstrap.shutdown()
            }
          })
        }
      }
  }
} 
Example 145
Source File: HostAddressResolver.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.workflowexecutor.executor

import java.net.{Inet6Address, InetAddress, NetworkInterface}

import scala.util.Try

import io.deepsense.commons.utils.Logging

object HostAddressResolver extends Logging {

  def findHostAddress(): InetAddress = {
    import collection.JavaConversions._
    Try {
      val interfaces = NetworkInterface.getNetworkInterfaces.toIterable
      interfaces.flatMap { n =>
        n.getInetAddresses.toIterable.filter {
          address =>
            !address.isInstanceOf[Inet6Address] &&
            !address.isLoopbackAddress &&
            !address.isSiteLocalAddress &&
            !address.isLinkLocalAddress &&
            !address.isAnyLocalAddress &&
            !address.isMulticastAddress &&
            !(address.getHostAddress == "255.255.255.255")
        }
      }
    }.get.headOption.getOrElse(InetAddress.getByName("127.0.0.1"))
  }
} 
Example 146
Source File: system_channel.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.io.{InputStream, OutputStream}
import java.net.{ServerSocket, InetAddress}


object System_Channel
{
  def apply(): System_Channel = new System_Channel
}

class System_Channel private
{
  private val server = new ServerSocket(0, 50, InetAddress.getByName("127.0.0.1"))

  val server_name: String = "127.0.0.1:" + server.getLocalPort
  override def toString: String = server_name

  def rendezvous(): (OutputStream, InputStream) =
  {
    val socket = server.accept
    socket.setTcpNoDelay(true)
    (socket.getOutputStream, socket.getInputStream)
  }

  def accepted() { server.close }
} 
Example 147
Source File: system_channel.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.io.{InputStream, OutputStream}
import java.net.{ServerSocket, InetAddress}


object System_Channel
{
  def apply(): System_Channel = new System_Channel
}

class System_Channel private
{
  private val server = new ServerSocket(0, 50, Server.localhost)

  val address: String = Server.print_address(server.getLocalPort)
  val password: String = UUID.random().toString

  override def toString: String = address

  def shutdown() { server.close }

  def rendezvous(): (OutputStream, InputStream) =
  {
    val socket = server.accept
    try {
      val out_stream = socket.getOutputStream
      val in_stream = socket.getInputStream

      if (Byte_Message.read_line(in_stream).map(_.text) == Some(password)) (out_stream, in_stream)
      else {
        out_stream.close
        in_stream.close
        error("Failed to connect system channel: bad password")
      }
    }
    finally { shutdown() }
  }
} 
Example 148
Source File: system_channel.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.io.{InputStream, OutputStream}
import java.net.{ServerSocket, InetAddress}


object System_Channel
{
  def apply(): System_Channel = new System_Channel
}

class System_Channel private
{
  private val server = new ServerSocket(0, 50, InetAddress.getByName("127.0.0.1"))

  val server_name: String = "127.0.0.1:" + server.getLocalPort
  override def toString: String = server_name

  def rendezvous(): (OutputStream, InputStream) =
  {
    val socket = server.accept
    socket.setTcpNoDelay(true)
    (socket.getOutputStream, socket.getInputStream)
  }

  def accepted() { server.close }
} 
Example 149
Source File: Main.scala    From spark-vector   with Apache License 2.0 5 votes vote down vote up
package com.actian.spark_vector.provider

import java.nio.channels.ServerSocketChannel

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import resource.managed
import java.net.InetAddress

import com.actian.spark_vector.util.Logging

object Main extends App with Logging {
  import ProviderAuth._

  private val conf = new SparkConf()
    .setAppName("Spark-Vector external tables provider")
    .set("spark.task.maxFailures", "1")
    .set("spark.sql.caseSensitive", "false")
  logInfo(s"Starting Spark-Vector provider with config options: ${conf.getAll.toMap}")

  private var builder = SparkSession.builder.config(conf)
  if (conf.getBoolean("spark.vector.provider.hive", true)) {
    builder = builder.enableHiveSupport()
  }
  private val session = builder.getOrCreate()
  private lazy val handler = new RequestHandler(session, ProviderAuth(generateUsername, generatePassword))

  sys.addShutdownHook {
    session.close()
    logInfo("Shutting down Spark-Vector provider...")
  }
  for { server <- managed(ServerSocketChannel.open.bind(null)) } {
    logInfo(s"Spark-Vector provider initialized and starting listening for requests on port ${server.socket.getLocalPort}")
    println(s"vector_provider_hostname=${InetAddress.getLocalHost.getHostName}")
    println(s"vector_provider_port=${server.socket.getLocalPort}")
    println(s"vector_provider_username=${handler.auth.username}")
    println(s"vector_provider_password=${handler.auth.password}")
    while (true) handler.handle(server.accept)
  }
} 
Example 150
Source File: IPConversion.scala    From spark-vector   with Apache License 2.0 5 votes vote down vote up
package com.actian.spark_vector.colbuffer.util

import java.math.BigInteger
import java.net.InetAddress

object IPConversion {
  
  def ipv4IntegerToString(i: Int): String = {
    val p = i ^ (1 << 31)
    Array(((p >> 24) & 0xFF), ((p >> 16) & 0xFF), ((p >> 8) & 0xFF), ( p & 0xFF)).mkString(".")
  }
  
  def ipv4StringToInteger(ipstr: String): Int = {
    (ipstr.split("\\.").reverse.zipWithIndex.map(p => p._1.toInt * math.pow(256 , p._2).toLong).sum ^ (1 << 31)).toInt
  }
  
  def ipv6LongsToString(lower: Long, upper: Long): String = {
    var l1 = lower
    var l2 = upper ^ (1l << 63)
    val iparr = for (i <- 0 until 8) yield {
      if (i < 4) {
        val s = (l1 & 0xFFFF).toHexString
        l1 = l1 >> 16
        s
      } else {
        val s = (l2 & 0xFFFF).toHexString
        l2 = l2 >> 16
        s
      }
    }
    var ip = iparr.reverse.mkString(":")
    
    if (ip.contains("0:0")) {
      ip = ip.replaceAll("0(:0)+", ":")
      if (ip.length == 1)
        ip += ":"
    }
    ip
  }
  
  def ipv6StringToLongs(ipstr: String): (Long, Long) = {
    val addr = InetAddress.getByName(ipstr)
    val value = new BigInteger(1, addr.getAddress)
    val lower = value.longValue()
    val upper = (value.shiftRight(64)).longValue() ^ (1l << 63)
    (lower, upper)
  }
} 
Example 151
Source File: LDAPAuthenticatorSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.api.security

import java.net.InetAddress
import akka.actor.Props
import akka.testkit.TestActorRef
import com.typesafe.config.ConfigFactory
import com.unboundid.ldap.listener.{InMemoryListenerConfig, InMemoryDirectoryServer, InMemoryDirectoryServerConfig}
import io.coral.TestHelper
import io.coral.actors.RootActor
import io.coral.actors.RootActor.{CreateHelperActors, CreateTestActors}
import io.coral.api.CoralConfig
import io.coral.api.security.Authenticator.Invalidate
import io.coral.cluster.ClusterMonitor

class LDAPAuthenticatorSpec
	extends AuthenticatorSpec("ldap") {
	override def createActorSystem = {
		val c = new CoralConfig(ConfigFactory.parseString(
			s"""{
			   |akka.actor.provider = "akka.actor.LocalActorRefProvider"
			   |coral {
			   |  cluster.enable = false
			   |  authentication {
			   |    mode = "ldap"
			   |    ldap {
			   |      host = "${InetAddress.getLocalHost.getHostAddress}"
			   |      port = 1234
			   |      bind-dn = "uid={user},ou=People,dc=example,dc=com"
			   |      mfa {
			   |        enabled = false
			   |      }
			   |    }
			   |  }
			   |}}""".stripMargin).withFallback(ConfigFactory.load()))
		initiateWithConfig(c)
	}

	var ldapServer: InMemoryDirectoryServer = _

	override def beforeAll() {
		startInMemoryLDAPServer()

		root = TestActorRef[RootActor](new RootActor(), "root")
		admin = system.actorSelection("/user/root/admin")

		root ! CreateHelperActors()

		cassandra = TestHelper.createCassandraActor(
			config.coral.cassandra.contactPoints.head.getHostName,
			config.coral.cassandra.port)
		TestHelper.prepareTables()
		TestHelper.clearAllTables()

		system.actorOf(Props(new ClusterMonitor(config)), "clusterMonitor")
		authenticator = system.actorSelection("/user/root/authenticator")
		permissionHandler = system.actorSelection("/user/root/authenticator/permissionHandler")

		runtimeUUID1 = TestHelper.createStandardRuntime("runtime1", user1, admin)
		runtimeUUID2 = TestHelper.createStandardRuntime("runtime2", user2, admin)
		runtimeUUID3 = TestHelper.createStandardRuntime("runtime3", user3, admin)

		userUUID1 = TestHelper.getUserUUIDFromUniqueName(authenticator, user1)
		userUUID2 = TestHelper.getUserUUIDFromUniqueName(authenticator, user2)
		userUUID3 = TestHelper.getUserUUIDFromUniqueName(authenticator, user3)

		authenticator ! Invalidate()

		Thread.sleep(500)
	}

	def startInMemoryLDAPServer() {
		val config = new InMemoryDirectoryServerConfig("dc=example,dc=com")
		config.setSchema(null)
		config.setListenerConfigs(InMemoryListenerConfig.createLDAPConfig(
			"LDAP listener", InetAddress.getLocalHost, 1234, null))
		config.addAdditionalBindCredentials("cn=Directory Manager", "password")
		ldapServer = new InMemoryDirectoryServer(config)
		val testDataFile = getClass.getResource("testdata.ldif").getFile
		ldapServer.importFromLDIF(true, testDataFile)
		ldapServer.startListening()
	}

	override def afterAll() {
		ldapServer.shutDown(true)
	}

	"An LDAP actor" should {
		"Properly create a connection" in {
			
		}
	}
} 
Example 152
Source File: PortProvider.scala    From ksql-streams   with Apache License 2.0 5 votes vote down vote up
package com.landoop.kstreams.sql.cluster

import java.net.{InetAddress, ServerSocket}

object PortProvider {
  def appy(count: Int): Vector[Int] = {
    (1 to count).map { _ =>
      val serverSocket = new ServerSocket(0, 0, InetAddress.getLocalHost)
      val port = serverSocket.getLocalPort
      serverSocket.close()
      port
    }.toVector
  }

  def one: Int = appy(1).head
} 
Example 153
Source File: Init.scala    From cave   with MIT License 5 votes vote down vote up
package init

import java.net.InetAddress
import java.util.UUID

import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration, Worker}
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory
import com.cave.metrics.data.AwsConfig
import com.cave.metrics.data.influxdb.{InfluxConfiguration, InfluxDataSink}
import com.cave.metrics.data.kinesis.RecordProcessorFactory
import com.typesafe.config.ConfigFactory
import org.apache.commons.logging.LogFactory
import play.api.Play

import scala.util.Try

object Init {

  // Docker should place the stream name in this environment variable
  final val EnvStreamName = "STREAM_NAME"

  // The name of this application for Kinesis Client Library
  final val ApplicationName = "cave-db-worker"

  // CloudWatch Reporter parameters
  final val MetricsNamespace = s"metrics-$ApplicationName"
  final val MetricsBufferTime = 1000L
  final val MetricsBufferSize = 200

  final val ThreadWaitTimeout = 10000L

  private val Log = LogFactory.getLog("db-writer-app")

  val worker = createWorker()
  val workerThread = new Thread(worker)

  def start(): Unit = {
    workerThread.start()
  }

  def shutdown(): Unit = {
    worker.shutdown()
    Try (workerThread.join(ThreadWaitTimeout)) recover {
      case e: Exception =>
        Log.info(s"Caught exception while joining worker thread: $e")
    }
  }

  
  private[this] def createWorker(): Worker = {
    val configuration = Play.current.configuration
    val serviceConfFile = configuration.getString("serviceConf").getOrElse("db-writer-service.conf")
    val kinesisAppName = configuration.getString("appName").getOrElse(ApplicationName)
    val appConfig = ConfigFactory.load(serviceConfFile).getConfig("db-writer")
    val awsConfig = new AwsConfig(appConfig)

    val streamName = System.getenv(EnvStreamName) match {
      case "processed" => awsConfig.processedStreamName
      case _ => awsConfig.rawStreamName
    }

    val workerId = s"${InetAddress.getLocalHost.getCanonicalHostName}:${UUID.randomUUID()}"

    Log.info(s"Running $ApplicationName for stream $streamName as worker $workerId")

    // a connection to the InfluxDB backend
    val influxConfig = appConfig.getConfig("influx")

    new Worker(
      // a factory for record processors
      new RecordProcessorFactory(
        awsConfig,
        new InfluxDataSink(InfluxConfiguration(influxConfig))),

      // a client library instance
      new KinesisClientLibConfiguration(kinesisAppName, streamName, awsConfig.awsCredentialsProvider, workerId)
        .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON),

      new NullMetricsFactory)
      // TODO: check out the possibility to use CloudWatch Metrics
      // new CWMetricsFactory(awsConfig.awsCredentialsProvider, MetricsNamespace, MetricsBufferTime, MetricsBufferSize))
  }
} 
Example 154
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka-schema-registry   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka.schemaregistry

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.schemaregistry.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 155
Source File: PeerKey.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, SocketAddress}

import io.netty.channel.ChannelHandlerContext
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.channel.socket.SocketChannel

sealed trait PeerKey
case object PeerKey {
  case class InetPeerKey(host: InetAddress, nonce: Long)     extends PeerKey
  case class SocketPeerKey(host: SocketAddress, nonce: Long) extends PeerKey

  def apply(ctx: ChannelHandlerContext, nodeNonce: Long): Option[PeerKey] = ctx.channel() match {
    case x: SocketChannel   => Option(x.remoteAddress()).map(_.getAddress).map(PeerKey.InetPeerKey(_, nodeNonce))
    case x: EmbeddedChannel => Option(x.remoteAddress()).map(PeerKey.SocketPeerKey(_, nodeNonce))
    case x                  => throw new IllegalArgumentException(s"Can't get PeerKey from ${id(ctx)}, $x")
  }
} 
Example 156
Source File: DhtNodeSelector.scala    From jvm-toxcore-c   with GNU General Public License v3.0 5 votes vote down vote up
package im.tox.tox4j

import java.io.IOException
import java.net.{ InetAddress, Socket }

import com.typesafe.scalalogging.Logger
import im.tox.tox4j.core.ToxCore
import im.tox.tox4j.impl.jni.ToxCoreImplFactory
import org.scalatest.Assertions
import org.slf4j.LoggerFactory

object DhtNodeSelector extends Assertions {

  private val logger = Logger(LoggerFactory.getLogger(this.getClass))
  private var selectedNode: Option[DhtNode] = Some(ToxCoreTestBase.nodeCandidates(0))

  @SuppressWarnings(Array("org.wartremover.warts.Equals"))
  private def tryConnect(node: DhtNode): Option[DhtNode] = {
    var socket: Socket = null
    try {
      socket = new Socket(InetAddress.getByName(node.ipv4), node.udpPort.value)
      assume(socket.getInputStream != null)
      Some(node)
    } catch {
      case e: IOException =>
        logger.info(s"TCP connection failed (${e.getMessage})")
        None
    } finally {
      if (socket != null) {
        socket.close()
      }
    }
  }

  private def tryBootstrap(
    withTox: (Boolean, Boolean) => (ToxCore => Option[DhtNode]) => Option[DhtNode],
    node: DhtNode,
    udpEnabled: Boolean
  ): Option[DhtNode] = {
    val protocol = if (udpEnabled) "UDP" else "TCP"
    val port = if (udpEnabled) node.udpPort else node.tcpPort
    logger.info(s"Trying to bootstrap with ${node.ipv4}:$port using $protocol")

    withTox(true, udpEnabled) { tox =>
      val status = new ConnectedListener
      if (!udpEnabled) {
        tox.addTcpRelay(node.ipv4, port, node.dhtId)
      }
      tox.bootstrap(node.ipv4, port, node.dhtId)

      // Try bootstrapping for 10 seconds.
      (0 to 10000 / tox.iterationInterval) find { _ =>
        tox.iterate(status)(())
        Thread.sleep(tox.iterationInterval)
        status.isConnected
      } match {
        case Some(time) =>
          logger.info(s"Bootstrapped successfully after ${time * tox.iterationInterval}ms using $protocol")
          Some(node)
        case None =>
          logger.info(s"Unable to bootstrap with $protocol")
          None
      }
    }
  }

  private def findNode(withTox: (Boolean, Boolean) => (ToxCore => Option[DhtNode]) => Option[DhtNode]): DhtNode = {
    DhtNodeSelector.selectedNode match {
      case Some(node) => node
      case None =>
        logger.info("Looking for a working bootstrap node")

        DhtNodeSelector.selectedNode = ToxCoreTestBase.nodeCandidates find { node =>
          logger.info(s"Trying to establish a TCP connection to ${node.ipv4}")

          (for {
            node <- tryConnect(node)
            node <- tryBootstrap(withTox, node, udpEnabled = true)
            node <- tryBootstrap(withTox, node, udpEnabled = false)
          } yield node).isDefined
        }

        assume(DhtNodeSelector.selectedNode.nonEmpty, "No viable nodes for bootstrap found; cannot test")
        DhtNodeSelector.selectedNode.get
    }
  }

  def node: DhtNode = findNode(ToxCoreImplFactory.withToxUnit[Option[DhtNode]])

} 
Example 157
Source File: ToxCoreTestBase.scala    From jvm-toxcore-c   with GNU General Public License v3.0 5 votes vote down vote up
package im.tox.tox4j

import java.io.IOException
import java.net.{ InetAddress, Socket }
import java.util.Random

import org.jetbrains.annotations.NotNull
import org.scalatest.Assertions

object ToxCoreTestBase extends Assertions {

  private[tox4j] val nodeCandidates = Seq(
    new DhtNode("tox.initramfs.io", "tox.initramfs.io", 33445, "3F0A45A268367C1BEA652F258C85F4A66DA76BCAA667A49E770BCC4917AB6A25"),
    new DhtNode("tox.verdict.gg", null, 33445, "1C5293AEF2114717547B39DA8EA6F1E331E5E358B35F9B6B5F19317911C5F976")
  )

  @NotNull def randomBytes(length: Int): Array[Byte] = {
    val array = new Array[Byte](length)
    new Random().nextBytes(array)
    array
  }

  @NotNull
  def readablePublicKey(@NotNull id: Array[Byte]): String = {
    val str = new StringBuilder
    id foreach { c => str.append(f"$c%02X") }
    str.toString()
  }

  @NotNull
  def parsePublicKey(@NotNull id: String): Array[Byte] = {
    val publicKey = new Array[Byte](id.length / 2)
    publicKey.indices foreach { i =>
      publicKey(i) =
        ((fromHexDigit(id.charAt(i * 2)) << 4) +
          fromHexDigit(id.charAt(i * 2 + 1))).toByte
    }
    publicKey
  }

  private def fromHexDigit(c: Char): Byte = {
    val digit =
      if (false) { 0 }
      else if ('0' to '9' contains c) { c - '0' }
      else if ('A' to 'F' contains c) { c - 'A' + 10 }
      else if ('a' to 'f' contains c) { c - 'a' + 10 }
      else { throw new IllegalArgumentException(s"Non-hex digit character: $c") }
    digit.toByte
  }

  @SuppressWarnings(Array("org.wartremover.warts.Equals"))
  private def hasConnection(ip: String, port: Int): Option[String] = {
    var socket: Socket = null
    try {
      socket = new Socket(InetAddress.getByName(ip), port)
      if (socket.getInputStream == null) {
        Some("Socket input stream is null")
      } else {
        None
      }
    } catch {
      case e: IOException =>
        Some(s"A network connection can't be established to $ip:$port: ${e.getMessage}")
    } finally {
      if (socket != null) {
        socket.close()
      }
    }
  }

  def checkIPv4: Option[String] = {
    hasConnection("8.8.8.8", 53)
  }

  def checkIPv6: Option[String] = {
    hasConnection("2001:4860:4860::8888", 53)
  }

  protected[tox4j] def assumeIPv4(): Unit = {
    assume(checkIPv4.isEmpty)
  }

  protected[tox4j] def assumeIPv6(): Unit = {
    assume(checkIPv6.isEmpty)
  }

} 
Example 158
Source File: BindAddress.scala    From zipkin-mesos-framework   with Apache License 2.0 5 votes vote down vote up
package net.elodina.mesos.zipkin.utils

import java.net.{Inet4Address, InetAddress, NetworkInterface}
import java.util
import scala.collection.JavaConversions._

class BindAddress(s: String) {
  private var _source: String = null
  private var _value: String = null

  def source: String = _source

  def value: String = _value

  parse()

  def parse() = {
    val idx = s.indexOf(":")
    if (idx != -1) {
      _source = s.substring(0, idx)
      _value = s.substring(idx + 1)
    } else {
      _value = s
    }

    if (source != null && source != "if")
      throw new IllegalArgumentException(s)
  }

  def resolve(): String = {
    _source match {
      case null => resolveAddress(_value)
      case "if" => resolveInterfaceAddress(_value)
      case _ => throw new IllegalStateException("Failed to resolve " + s)
    }
  }

  def resolveAddress(addressOrMask: String): String = {
    if (!addressOrMask.endsWith("*")) return addressOrMask
    val prefix = addressOrMask.substring(0, addressOrMask.length - 1)

    for (ni <- NetworkInterface.getNetworkInterfaces) {
      val address = ni.getInetAddresses.find(_.getHostAddress.startsWith(prefix)).orNull
      if (address != null) return address.getHostAddress
    }

    throw new IllegalStateException("Failed to resolve " + s)
  }

  def resolveInterfaceAddress(name: String): String = {
    val ni = NetworkInterface.getNetworkInterfaces.find(_.getName == name).orNull
    if (ni == null) throw new IllegalStateException("Failed to resolve " + s)

    val addresses: util.Enumeration[InetAddress] = ni.getInetAddresses
    val address = addresses.find(_.isInstanceOf[Inet4Address]).orNull
    if (address != null) return address.getHostAddress

    throw new IllegalStateException("Failed to resolve " + s)
  }

  override def hashCode(): Int = 31 * _source.hashCode + _value.hashCode

  override def equals(o: scala.Any): Boolean = {
    if (o == null || o.getClass != this.getClass) return false
    val address = o.asInstanceOf[BindAddress]
    _source == address._source && _value == address._value
  }

  override def toString: String = s
} 
Example 159
Source File: PortProvider.scala    From kafka-testing   with Apache License 2.0 5 votes vote down vote up
package com.landoop.kafka.testing

import java.net.{InetAddress, ServerSocket}

object PortProvider {
  def appy(count: Int): Vector[Int] = {
    (1 to count).map { _ =>
      val serverSocket = new ServerSocket(0, 0, InetAddress.getLocalHost)
      val port = serverSocket.getLocalPort
      serverSocket.close()
      port
    }.toVector
  }

  def one: Int = appy(1).head
} 
Example 160
Source File: KeyValueRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd


import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.client.java.document.Document
import com.couchbase.spark.Logging
import com.couchbase.spark.connection.{CouchbaseConfig, CouchbaseConnection, KeyValueAccessor}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}

import scala.reflect.ClassTag
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration

class KeyValuePartition(id: Int, docIds: Seq[String], loc: Option[InetAddress]) extends Partition {
  override def index: Int = id
  def ids: Seq[String] = docIds
  def location: Option[InetAddress] = loc
  override def toString = s"KeyValuePartition($id, $docIds, $loc)"
}

class KeyValueRDD[D <: Document[_]]
  (@transient private val sc: SparkContext, ids: Seq[String], bname: String = null,
   timeout: Option[Duration] = None)
  (implicit ct: ClassTag[D])
  extends RDD[D](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)

  override def compute(split: Partition, context: TaskContext): Iterator[D] = {
    val p = split.asInstanceOf[KeyValuePartition]
    new KeyValueAccessor[D](cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        ids.groupBy(id => {
          val crc32 = new CRC32()
          crc32.update(id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new KeyValuePartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new KeyValuePartition(0, ids, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[KeyValuePartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

} 
Example 161
Source File: SubdocLookupRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.spark.Logging
import com.couchbase.spark.connection._
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration


class SubdocLookupPartition(id: Int, specs: Seq[SubdocLookupSpec], loc: Option[InetAddress])
  extends Partition {
  override def index: Int = id
  def ids: Seq[SubdocLookupSpec] = specs
  def location: Option[InetAddress] = loc
  override def toString = s"SubdocLookupPartition($id, $ids, $loc)"
}


class SubdocLookupRDD(@transient private val sc: SparkContext, specs: Seq[SubdocLookupSpec],
                      bname: String = null, timeout: Option[Duration] = None)
  extends RDD[SubdocLookupResult](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)


  override def compute(split: Partition, context: TaskContext): Iterator[SubdocLookupResult] = {
    val p = split.asInstanceOf[SubdocLookupPartition]
    new SubdocLookupAccessor(cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        specs.groupBy(spec => {
          val crc32 = new CRC32()
          crc32.update(spec.id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new SubdocLookupPartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new SubdocLookupPartition(0, specs, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[SubdocLookupPartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

} 
Example 162
Source File: SubdocMutateRDD.scala    From couchbase-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.couchbase.spark.rdd

import java.net.InetAddress
import java.util.zip.CRC32

import com.couchbase.client.core.config.CouchbaseBucketConfig
import com.couchbase.client.core.message.cluster.{GetClusterConfigRequest, GetClusterConfigResponse}
import com.couchbase.spark.connection._
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import rx.lang.scala.JavaConversions._

import scala.concurrent.duration.Duration

class SubdocMutationPartition(id: Int, specs: Seq[SubdocMutationSpec], loc: Option[InetAddress])
  extends Partition {
  override def index: Int = id
  def ids: Seq[SubdocMutationSpec] = specs
  def location: Option[InetAddress] = loc
  override def toString = s"SubdocMutatePartition($id, $ids, $loc)"
}

class SubdocMutateRDD(@transient private val sc: SparkContext, specs: Seq[SubdocMutationSpec],
                      bname: String = null, timeout: Option[Duration] = None)
  extends RDD[SubdocMutationResult](sc, Nil) {

  private val cbConfig = CouchbaseConfig(sc.getConf)
  private val bucketName = Option(bname).getOrElse(cbConfig.buckets.head.name)


  override def compute(split: Partition, context: TaskContext): Iterator[SubdocMutationResult] = {
    val p = split.asInstanceOf[SubdocMutationPartition]
    new SubdocMutationAccessor(cbConfig, p.ids, bucketName, timeout).compute()
  }

  override protected def getPartitions: Array[Partition] = {
    val core = CouchbaseConnection().bucket(cbConfig, bucketName).core()

    val req = new GetClusterConfigRequest()
    val config = toScalaObservable(core.send[GetClusterConfigResponse](req))
      .map(c => {
        logWarning(c.config().bucketConfigs().toString)
        logWarning(bucketName)
        c.config().bucketConfig(bucketName)
      })
      .toBlocking
      .single

    val parts = config match {
      case bucketConfig: CouchbaseBucketConfig =>
        val numPartitions = bucketConfig.numberOfPartitions()
        var partitionIndex = 0
        specs.groupBy(spec => {
          val crc32 = new CRC32()
          crc32.update(spec.id.getBytes("UTF-8"))
          val rv = (crc32.getValue >> 16) & 0x7fff
          rv.toInt & numPartitions - 1
        }).map(grouped => {
          val hostname = Some(
            bucketConfig.nodeAtIndex(bucketConfig.nodeIndexForMaster(grouped._1, false)).hostname()
          )
          val currentIdx = partitionIndex
          partitionIndex += 1
          new SubdocMutationPartition(currentIdx, grouped._2,
            Some(InetAddress.getByName(hostname.get)))
        }).toArray
      case _ =>
        logWarning("Memcached preferred locations currently not supported.")
        Array(new SubdocMutationPartition(0, specs, None))
    }

    parts.asInstanceOf[Array[Partition]]
  }

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    val p = split.asInstanceOf[SubdocMutationPartition]
    if (p.location.isDefined) {
      Seq(p.location.get.getHostName, p.location.get.getHostAddress)
    } else {
      Nil
    }
  }

} 
Example 163
Source File: BlacklistSpecification.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}

import com.typesafe.config.ConfigFactory
import com.wavesplatform.settings.NetworkSettings
import net.ceedubs.ficus.Ficus._
import org.scalatest.{FeatureSpec, GivenWhenThen}

class BlacklistSpecification extends FeatureSpec with GivenWhenThen {
  private val config = ConfigFactory.parseString("""waves.network {
      |  known-peers = []
      |  file = null
      |  black-list-residence-time: 1s
      |}""".stripMargin).withFallback(ConfigFactory.load()).resolve()

  private val networkSettings = config.as[NetworkSettings]("waves.network")

  info("As a Peer")
  info("I want to blacklist other peers for certain time")
  info("So I can give them another chance after")

  feature("Blacklist") {
    scenario("Peer blacklist another peer") {

      Given("Peer database is empty")
      val peerDatabase = new PeerDatabaseImpl(networkSettings)

      def isBlacklisted(address: InetSocketAddress) = peerDatabase.blacklistedHosts.contains(address.getAddress)

      assert(peerDatabase.knownPeers.isEmpty)
      assert(peerDatabase.blacklistedHosts.isEmpty)

      When("Peer adds another peer to knownPeers")
      val address = new InetSocketAddress(InetAddress.getByName("localhost"), 1234)
      peerDatabase.touch(address)
      assert(peerDatabase.knownPeers.contains(address))
      assert(!isBlacklisted(address))

      And("Peer blacklists another peer")
      peerDatabase.blacklist(address.getAddress, "")
      assert(isBlacklisted(address))
      assert(!peerDatabase.knownPeers.contains(address))

      And("Peer waits for some time")
      Thread.sleep(networkSettings.blackListResidenceTime.toMillis + 500)

      Then("Another peer disappear from blacklist")
      assert(!isBlacklisted(address))

      And("Another peer became known")
      assert(peerDatabase.knownPeers.contains(address))
    }
  }
} 
Example 164
Source File: Time.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.utils

import java.net.{InetAddress, SocketTimeoutException}

import monix.eval.Task
import monix.execution.ExecutionModel
import monix.execution.schedulers.SchedulerService
import org.apache.commons.net.ntp.NTPUDPClient

import scala.concurrent.duration.DurationInt

trait Time {
  def correctedTime(): Long

  def getTimestamp(): Long
}

class NTP(ntpServer: String) extends Time with ScorexLogging with AutoCloseable {

  log.info("Initializing time")

  private val offsetPanicThreshold = 1000000L
  private val ExpirationTimeout    = 60.seconds
  private val RetryDelay           = 10.seconds
  private val ResponseTimeout      = 10.seconds

  private implicit val scheduler: SchedulerService =
    Schedulers.singleThread(name = "time-impl", reporter = log.error("Error in NTP", _), ExecutionModel.AlwaysAsyncExecution)

  private val client = new NTPUDPClient()
  client.setDefaultTimeout(ResponseTimeout.toMillis.toInt)

  @volatile private var offset = 0L
  private val updateTask: Task[Unit] = {
    def newOffsetTask: Task[Option[(InetAddress, java.lang.Long)]] = Task {
      try {
        client.open()
        val info = client.getTime(InetAddress.getByName(ntpServer))
        info.computeDetails()
        Option(info.getOffset).map { offset =>
          val r = if (Math.abs(offset) > offsetPanicThreshold) throw new Exception("Offset is suspiciously large") else offset
          (info.getAddress, r)
        }
      } catch {
        case _: SocketTimeoutException =>
          None
        case t: Throwable =>
          log.warn("Problems with NTP: ", t)
          None
      } finally {
        client.close()
      }
    }

    newOffsetTask.flatMap {
      case None if !scheduler.isShutdown => updateTask.delayExecution(RetryDelay)
      case Some((server, newOffset)) if !scheduler.isShutdown =>
        log.trace(s"Adjusting time with $newOffset milliseconds, source: ${server.getHostAddress}.")
        offset = newOffset
        updateTask.delayExecution(ExpirationTimeout)
      case _ => Task.unit
    }
  }

  def correctedTime(): Long = System.currentTimeMillis() + offset

  private var txTime: Long = 0

  def getTimestamp(): Long = {
    txTime = Math.max(correctedTime(), txTime + 1)
    txTime
  }

  private val taskHandle = updateTask.runAsyncLogErr

  override def close(): Unit = {
    log.info("Shutting down Time")
    taskHandle.cancel()
    scheduler.shutdown()
  }
} 
Example 165
Source File: Elasticsearch23InputFormatSpec.scala    From flink-elasticsearch-source-connector   with Apache License 2.0 5 votes vote down vote up
package com.mnubo.flink.streaming.connectors.elasticsearch

import java.net.InetAddress

import com.mnubo.flink.streaming.connectors._
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.cluster.health.ClusterHealthStatus
import org.elasticsearch.common.transport.InetSocketTransportAddress

class Elasticsearch23InputFormatSpec extends ElasticsearchInputFormatSpec {
  protected override def elasticSearchVersion = "2.3.3"

  private def createClientInternal(host: String = es.host, port: Int = es.esTransportPort) =
    TransportClient
      .builder()
      .build()
      .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host), port))

  protected override def isClusterGreen(host: String, port: Int) =
    using(createClientInternal(host, port))(c => c.admin.cluster.prepareHealth().get.getStatus == ClusterHealthStatus.GREEN)

  protected override def createClient: ESClient = new ESClient {
    private val internalClient = createClientInternal()

    override def createIndex(indexName: String, docType: String, mapping: String) =
      internalClient
        .admin
        .indices
        .prepareCreate(indexName)
        .addMapping(docType, mapping)
        .get

    override def flush(indexName: String) =
      internalClient
        .admin
        .indices
        .prepareFlush(indexName)
        .setForce(true)
        .get


    override def index(indexName: String, docType: String, doc: String) =
      internalClient
        .prepareIndex(indexName, docType)
        .setSource(doc)
        .get

    override def close() =
      internalClient.close()
  }
} 
Example 166
Source File: DockerUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{Inet4Address, InetAddress, NetworkInterface}

import scala.collection.JavaConverters._
import scala.sys.process._
import scala.util.Try

private[spark] object DockerUtils {

  def getDockerIp(): String = {
    
    def findFromDockerMachine(): Option[String] = {
      sys.env.get("DOCKER_MACHINE_NAME").flatMap { name =>
        Try(Seq("/bin/bash", "-c", s"docker-machine ip $name 2>/dev/null").!!.trim).toOption
      }
    }
    sys.env.get("DOCKER_IP")
      .orElse(findFromDockerMachine())
      .orElse(Try(Seq("/bin/bash", "-c", "boot2docker ip 2>/dev/null").!!.trim).toOption)
      .getOrElse {
        // This block of code is based on Utils.findLocalInetAddress(), but is modified to blacklist
        // certain interfaces.
        val address = InetAddress.getLocalHost
        // Address resolves to something like 127.0.1.1, which happens on Debian; try to find
        // a better address using the local network interfaces
        // getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
        // on unix-like system. On windows, it returns in index order.
        // It's more proper to pick ip address following system output order.
        val blackListedIFs = Seq(
          "vboxnet0",  // Mac
          "docker0"    // Linux
        )
        val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter { i =>
          !blackListedIFs.contains(i.getName)
        }
        val reOrderedNetworkIFs = activeNetworkIFs.reverse
        for (ni <- reOrderedNetworkIFs) {
          val addresses = ni.getInetAddresses.asScala
            .filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
          if (addresses.nonEmpty) {
            val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
            // because of Inet6Address.toHostName may add interface at the end if it knows about it
            val strippedAddress = InetAddress.getByAddress(addr.getAddress)
            return strippedAddress.getHostAddress
          }
        }
        address.getHostAddress
      }
  }
} 
Example 167
Source File: Main.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.event.subscribers

import java.net.InetAddress

import akka.Done
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown.PhaseBeforeActorSystemTerminate
import akka.cluster.Cluster
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object Main extends StrictLogging {
  def main(args: Array[String]): Unit = {
    import ai.forestflow.startup.ActorSystemStartup._

    preStartup(typeSafeConfig)

    logger.info(s"Started system: [$system], cluster.selfAddress = ${cluster.selfAddress}")

    shutdown.addTask(PhaseBeforeActorSystemTerminate, "main.cleanup") { () => cleanup(typeSafeConfig) }

    bootstrapCluster(system, cluster)

    logger.info(s"Sharding lease owner for this node will be set to: ${cluster.selfAddress.hostPort}")

    // Start application after self member joined the cluster (Up)
    cluster.registerOnMemberUp({
      logger.info(s"Cluster Member is up: ${cluster.selfMember.toString()}")
      postStartup
    })

  }

  private def bootstrapCluster(system: ActorSystem, cluster: Cluster): Unit = {
    // Akka Management hosts the HTTP routes used by bootstrap
    AkkaManagement(system).start()

    // Starting the bootstrap process needs to be done explicitly
    ClusterBootstrap(system).start()

    system.log.info(s"Akka Management hostname from InetAddress.getLocalHost.getHostAddress is: ${InetAddress.getLocalHost.getHostAddress}")
  }

  private def preStartup(config: Config): Unit = {

  }

  private def postStartup(implicit system: ActorSystem, config: Config): Unit = {
    // Kafka Prediction Logger setup
    import system.log

    val basic_topic = Try(config.getString("application.kafka-prediction-logger.basic-topic")).toOption
    val gp_topic = Try(config.getString("application.kafka-prediction-logger.graphpipe-topic")).toOption

    if (basic_topic.isDefined || gp_topic.isDefined){
      log.info(s"Setting up Kafka prediction logging with basic_topic: $basic_topic graphpipe_topic: $gp_topic")
      val predictionLogger = system.actorOf(PredictionLogger.props(basic_topic, gp_topic))
    }
  }

  private def cleanup(config: Config)(implicit executionContext: ExecutionContext) = Future {
    Done
  }
} 
Example 168
Source File: JsonSerializersSpec.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.api

import java.net.InetAddress
import java.util.UUID

import fr.acinq.bitcoin.{ByteVector32, OutPoint, Transaction}
import fr.acinq.eclair._
import fr.acinq.eclair.payment.{PaymentRequest, PaymentSettlingOnChain}
import fr.acinq.eclair.transactions.{IncomingHtlc, OutgoingHtlc}
import fr.acinq.eclair.wire._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import scodec.bits._

class JsonSerializersSpec extends AnyFunSuite with Matchers {

  test("deserialize Map[OutPoint, ByteVector]") {
    val output1 = OutPoint(ByteVector32(hex"11418a2d282a40461966e4f578e1fdf633ad15c1b7fb3e771d14361127233be1"), 0)
    val output2 = OutPoint(ByteVector32(hex"3d62bd4f71dc63798418e59efbc7532380c900b5e79db3a5521374b161dd0e33"), 1)
    val map = Map(
      output1 -> hex"dead",
      output2 -> hex"beef"
    )

    // it won't work with the default key serializer
    val error = intercept[org.json4s.MappingException] {
      JsonSupport.serialization.write(map)(org.json4s.DefaultFormats)
    }
    assert(error.msg.contains("Do not know how to serialize key of type class fr.acinq.bitcoin.OutPoint."))

    // but it works with our custom key serializer
    val json = JsonSupport.serialization.write(map)(org.json4s.DefaultFormats + new ByteVectorSerializer + new OutPointKeySerializer)
    assert(json === s"""{"${output1.txid}:0":"dead","${output2.txid}:1":"beef"}""")
  }

  test("NodeAddress serialization") {
    val ipv4 = NodeAddress.fromParts("10.0.0.1", 8888).get
    val ipv6LocalHost = NodeAddress.fromParts(InetAddress.getByAddress(Array(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)).getHostAddress, 9735).get
    val tor2 = Tor2("aaaqeayeaudaocaj", 7777)
    val tor3 = Tor3("aaaqeayeaudaocajbifqydiob4ibceqtcqkrmfyydenbwha5dypsaijc", 9999)

    JsonSupport.serialization.write(ipv4)(org.json4s.DefaultFormats + new NodeAddressSerializer) shouldBe s""""10.0.0.1:8888""""
    JsonSupport.serialization.write(ipv6LocalHost)(org.json4s.DefaultFormats + new NodeAddressSerializer) shouldBe s""""[0:0:0:0:0:0:0:1]:9735""""
    JsonSupport.serialization.write(tor2)(org.json4s.DefaultFormats + new NodeAddressSerializer) shouldBe s""""aaaqeayeaudaocaj.onion:7777""""
    JsonSupport.serialization.write(tor3)(org.json4s.DefaultFormats + new NodeAddressSerializer) shouldBe s""""aaaqeayeaudaocajbifqydiob4ibceqtcqkrmfyydenbwha5dypsaijc.onion:9999""""
  }

  test("DirectedHtlc serialization") {
    val add = UpdateAddHtlc(
      channelId = ByteVector32(hex"345b2b05ec046ffe0c14d3b61838c79980713ad1cf8ae7a45c172ce90c9c0b9f"),
      id = 926,
      amountMsat = 12365.msat,
      paymentHash = ByteVector32(hex"9fcd45bbaa09c60c991ac0425704163c3f3d2d683c789fa409455b9c97792692"),
      cltvExpiry = CltvExpiry(621500),
      onionRoutingPacket = OnionRoutingPacket(
        version = 0,
        publicKey = hex"0270685ca81a8e4d4d01beec5781f4cc924684072ae52c507f8ebe9daf0caaab7b",
        payload = hex"3c7a66997c681a3de1bae56438abeee4fc50a16554725a430ade1dc8db6bdd76704d45c6151c4051d710cf487e63f8cbe9f5e537e6e518d7998f11c40277d551bee036d227a421f344c0185b4533660d200acbc4f4aa6148e29f813bb537e950a79ba961b80ccaa6ad808cb88f858ee73f8b1f129d3214d194f76fc011c46e18c2caec0fcb69715e79cb381e449e5be20281e0aaa92defa089c98b7e29c22181f2d1af9f5fe2a37ede4ac3163b123aa72318201b1128b17053c381e7bf111620dfb7ea3dcc5c28cafdd9cb7bb1a4202b64199aa02af145c563ba1b9f10288203ce2666f486aacb2ee385dc0b67915864c95174dfaac1e0ea195329d1741cd1febb4b49b33f84e0d10a5ec8ee0a1a94f73abf081ec69bb863edfeb46d24ce424b025ac3f593a7ba9419ec9b17fb39f0fbeac80e0898f95b6709cbc95f7c097e22e3a6ca0efbc947bbcd4a9077f6bd9daba25b18bb16179fca9d9cb2ce49fc7cbd2de589237926cacb87ea60e2cc60a90b47575517921b5529b8a95823dd0c3d02a7747d74c4ca927ba6b70c06c1c1ef27e14d371e8dd8f5d9380a65b08ae1e6384f9b3575c5d7278de22ce80e63612a27f3b3f45dbe32ee855185293c719e5a7203a682a08fd810c46fa12b67e61349831f8fae3f558090ea988e1a22ec877b790ea09169055529247c4dd597857aad74eaeb3a5879e96453e681e213f2796ed704d620509f34f91d9d16f881fd397e2836a0a4d2f1bcd230067f7acb5381a2b17e8c5135e38c4d258afbe4f69ac7ad39b789e99686ee926b3ad31b98993673313b7b18a4faaea238d8055824fde7339a791fc7777ef28cc4a1a5d177b3c3882ced4921c6cd85ae82e1fe13fe680ae432a9918ce37b15f88d4d18fb16b69e5369d18c204aaf7ee49b830bf2328380e6ad96e8f6a9e01bc2c97ffbaa402d5406dc7b83c6eb5d515ffc3bea8c42cf299c9e2bea693515246c6ff859d33ba6c4da4c4c1706e0c6b4a574e927f02eb92b7d56722cff80c3e6f6b98d1c84cb576abdcc27a6bc7b110fc2ac5fead57f05ad854c3331ce1ff94c0dc97303540ee797d71566497af09f20e3554d467528e1fed8e69438171072fe2deca3979a8f5ec9043b9bc4da921b095c29dc0294148c1b7001dafda4c48600d1194f745e6d0689c561bf19d20758c4d25fac64d81780607a4106e220ef546fc4026af7b9da8defb2fe3c21d48798ac67c794fb40aabe44618a8911673466be06808c6f54a772b87bcfafb4d120a9bebffb8051bf24bb332eaa769cf175c1aadb0186f8946dc32513fd81fe1a61bfb860886bdd070359a43e06e74607d300bd2e01a3f1ee900c4039e8db742170228db61ef0c77724c49d1573144564a80cc1ebc0449b34f84be35187ceba3fbc2facf5ad1f1e15945e3c6c236579aca7bc97e4cc76a3310022693b64008562b254a7d11c0086813e551c4817bbb72a1d6fbfc84d326ce973651200f80aa8ab0976c53c390249ca8e7e5ec21b80e70c3e0205983d313b28a5d1b9d9149501e05d3257c8ae88c6308e9e00feeab19121d1032a582e68ca1f9f64a1fd91cb5d8613b985fd4be22a4d5c14a132c20811a75ee3cc61de0b3fbc3254d61995d086603032269888b942ec0971ad26ea4b8df1746c5ec1de904ddeb5045abc0a6ede9d6a199ed0782cb69efa3a4dc00747553dbef12fb8299ca364b4cdf2ac3eba03b0d8b273684116bba4458b5717bc4aca5406901173a89b3643ccc076f22779fccf1ad69981e24eef18c711a2d58dfe5834b41f9e7166d54dc8628e754baaca1cbb7db8256f88ebc889de6078ba83a1af14a4",
        hmac = ByteVector32(hex"9442626f72c475963dbddf8a57ab2cef3013eb3d6a5e8afbea9e631dac4481f5")
      )
    )

    val expectedIn = """{"direction":"IN","add":{"channelId":"345b2b05ec046ffe0c14d3b61838c79980713ad1cf8ae7a45c172ce90c9c0b9f","id":926,"amountMsat":12365,"paymentHash":"9fcd45bbaa09c60c991ac0425704163c3f3d2d683c789fa409455b9c97792692","cltvExpiry":621500,"onionRoutingPacket":{"version":0,"publicKey":"0270685ca81a8e4d4d01beec5781f4cc924684072ae52c507f8ebe9daf0caaab7b","payload":"3c7a66997c681a3de1bae56438abeee4fc50a16554725a430ade1dc8db6bdd76704d45c6151c4051d710cf487e63f8cbe9f5e537e6e518d7998f11c40277d551bee036d227a421f344c0185b4533660d200acbc4f4aa6148e29f813bb537e950a79ba961b80ccaa6ad808cb88f858ee73f8b1f129d3214d194f76fc011c46e18c2caec0fcb69715e79cb381e449e5be20281e0aaa92defa089c98b7e29c22181f2d1af9f5fe2a37ede4ac3163b123aa72318201b1128b17053c381e7bf111620dfb7ea3dcc5c28cafdd9cb7bb1a4202b64199aa02af145c563ba1b9f10288203ce2666f486aacb2ee385dc0b67915864c95174dfaac1e0ea195329d1741cd1febb4b49b33f84e0d10a5ec8ee0a1a94f73abf081ec69bb863edfeb46d24ce424b025ac3f593a7ba9419ec9b17fb39f0fbeac80e0898f95b6709cbc95f7c097e22e3a6ca0efbc947bbcd4a9077f6bd9daba25b18bb16179fca9d9cb2ce49fc7cbd2de589237926cacb87ea60e2cc60a90b47575517921b5529b8a95823dd0c3d02a7747d74c4ca927ba6b70c06c1c1ef27e14d371e8dd8f5d9380a65b08ae1e6384f9b3575c5d7278de22ce80e63612a27f3b3f45dbe32ee855185293c719e5a7203a682a08fd810c46fa12b67e61349831f8fae3f558090ea988e1a22ec877b790ea09169055529247c4dd597857aad74eaeb3a5879e96453e681e213f2796ed704d620509f34f91d9d16f881fd397e2836a0a4d2f1bcd230067f7acb5381a2b17e8c5135e38c4d258afbe4f69ac7ad39b789e99686ee926b3ad31b98993673313b7b18a4faaea238d8055824fde7339a791fc7777ef28cc4a1a5d177b3c3882ced4921c6cd85ae82e1fe13fe680ae432a9918ce37b15f88d4d18fb16b69e5369d18c204aaf7ee49b830bf2328380e6ad96e8f6a9e01bc2c97ffbaa402d5406dc7b83c6eb5d515ffc3bea8c42cf299c9e2bea693515246c6ff859d33ba6c4da4c4c1706e0c6b4a574e927f02eb92b7d56722cff80c3e6f6b98d1c84cb576abdcc27a6bc7b110fc2ac5fead57f05ad854c3331ce1ff94c0dc97303540ee797d71566497af09f20e3554d467528e1fed8e69438171072fe2deca3979a8f5ec9043b9bc4da921b095c29dc0294148c1b7001dafda4c48600d1194f745e6d0689c561bf19d20758c4d25fac64d81780607a4106e220ef546fc4026af7b9da8defb2fe3c21d48798ac67c794fb40aabe44618a8911673466be06808c6f54a772b87bcfafb4d120a9bebffb8051bf24bb332eaa769cf175c1aadb0186f8946dc32513fd81fe1a61bfb860886bdd070359a43e06e74607d300bd2e01a3f1ee900c4039e8db742170228db61ef0c77724c49d1573144564a80cc1ebc0449b34f84be35187ceba3fbc2facf5ad1f1e15945e3c6c236579aca7bc97e4cc76a3310022693b64008562b254a7d11c0086813e551c4817bbb72a1d6fbfc84d326ce973651200f80aa8ab0976c53c390249ca8e7e5ec21b80e70c3e0205983d313b28a5d1b9d9149501e05d3257c8ae88c6308e9e00feeab19121d1032a582e68ca1f9f64a1fd91cb5d8613b985fd4be22a4d5c14a132c20811a75ee3cc61de0b3fbc3254d61995d086603032269888b942ec0971ad26ea4b8df1746c5ec1de904ddeb5045abc0a6ede9d6a199ed0782cb69efa3a4dc00747553dbef12fb8299ca364b4cdf2ac3eba03b0d8b273684116bba4458b5717bc4aca5406901173a89b3643ccc076f22779fccf1ad69981e24eef18c711a2d58dfe5834b41f9e7166d54dc8628e754baaca1cbb7db8256f88ebc889de6078ba83a1af14a4","hmac":"9442626f72c475963dbddf8a57ab2cef3013eb3d6a5e8afbea9e631dac4481f5"}}}"""

    JsonSupport.serialization.write(IncomingHtlc(add))(org.json4s.DefaultFormats + new DirectedHtlcSerializer) shouldBe expectedIn
    JsonSupport.serialization.write(OutgoingHtlc(add))(org.json4s.DefaultFormats + new DirectedHtlcSerializer) shouldBe expectedIn.replace("IN", "OUT")
  }

  test("Payment Request") {
    val ref = "lnbcrt50n1p0fm9cdpp5al3wvsfkc6p7fxy89eu8gm4aww9mseu9syrcqtpa4mvx42qelkwqdq9v9ekgxqrrss9qypqsqsp5wl2t45v0hj4lgud0zjxcnjccd29ts0p2kh4vpw75vnhyyzyjtjtqarpvqg33asgh3z5ghfuvhvtf39xtnu9e7aqczpgxa9quwsxkd9rnwmx06pve9awgeewxqh90dqgrhzgsqc09ek6uejr93z8puafm6gsqgrk0hy"
    val pr = PaymentRequest.read(ref)
    JsonSupport.serialization.write(pr)(JsonSupport.formats) shouldBe """{"prefix":"lnbcrt","timestamp":1587386125,"nodeId":"03b207771ddba774e318970e9972da2491ff8e54f777ad0528b6526773730248a0","serialized":"lnbcrt50n1p0fm9cdpp5al3wvsfkc6p7fxy89eu8gm4aww9mseu9syrcqtpa4mvx42qelkwqdq9v9ekgxqrrss9qypqsqsp5wl2t45v0hj4lgud0zjxcnjccd29ts0p2kh4vpw75vnhyyzyjtjtqarpvqg33asgh3z5ghfuvhvtf39xtnu9e7aqczpgxa9quwsxkd9rnwmx06pve9awgeewxqh90dqgrhzgsqc09ek6uejr93z8puafm6gsqgrk0hy","description":"asd","paymentHash":"efe2e64136c683e498872e78746ebd738bb867858107802c3daed86aa819fd9c","expiry":3600,"amount":5000,"features":{"activated":[{"name":"var_onion_optin","support":"optional"},{"name":"payment_secret","support":"optional"}],"unknown":[]}}"""
  }

  test("type hints") {
    val e1 = PaymentSettlingOnChain(UUID.randomUUID, 42 msat, randomBytes32)
    assert(JsonSupport.serialization.writePretty(e1)(JsonSupport.formats).contains("\"type\" : \"payment-settling-onchain\""))
  }

  test("transaction serializer") {
    val tx = Transaction.read("0200000001c8a8934fb38a44b969528252bc37be66ee166c7897c57384d1e561449e110c93010000006b483045022100dc6c50f445ed53d2fb41067fdcb25686fe79492d90e6e5db43235726ace247210220773d35228af0800c257970bee9cf75175d75217de09a8ecd83521befd040c4ca012102082b751372fe7e3b012534afe0bb8d1f2f09c724b1a10a813ce704e5b9c217ccfdffffff0247ba2300000000001976a914f97a7641228e6b17d4b0b08252ae75bd62a95fe788ace3de24000000000017a914a9fefd4b9a9282a1d7a17d2f14ac7d1eb88141d287f7d50800")
    assert(JsonSupport.serialization.write(tx)(JsonSupport.formats) == "{\"txid\":\"3ef63b5d297c9dcf93f33b45b9f102733c36e8ef61da1ccf2bc132a10584be18\",\"tx\":\"0200000001c8a8934fb38a44b969528252bc37be66ee166c7897c57384d1e561449e110c93010000006b483045022100dc6c50f445ed53d2fb41067fdcb25686fe79492d90e6e5db43235726ace247210220773d35228af0800c257970bee9cf75175d75217de09a8ecd83521befd040c4ca012102082b751372fe7e3b012534afe0bb8d1f2f09c724b1a10a813ce704e5b9c217ccfdffffff0247ba2300000000001976a914f97a7641228e6b17d4b0b08252ae75bd62a95fe788ace3de24000000000017a914a9fefd4b9a9282a1d7a17d2f14ac7d1eb88141d287f7d50800\"}")
  }
} 
Example 169
Source File: Config.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.restserver

import java.net.InetAddress

import docspell.backend.auth.Login
import docspell.backend.{Config => BackendConfig}
import docspell.common._
import docspell.ftssolr.SolrConfig

case class Config(
    appName: String,
    appId: Ident,
    baseUrl: LenientUri,
    bind: Config.Bind,
    backend: BackendConfig,
    auth: Login.Config,
    integrationEndpoint: Config.IntegrationEndpoint,
    maxItemPageSize: Int,
    fullTextSearch: Config.FullTextSearch
)

object Config {

  case class Bind(address: String, port: Int)

  case class IntegrationEndpoint(
      enabled: Boolean,
      priority: Priority,
      allowedIps: IntegrationEndpoint.AllowedIps,
      httpBasic: IntegrationEndpoint.HttpBasic,
      httpHeader: IntegrationEndpoint.HttpHeader
  )

  object IntegrationEndpoint {
    case class HttpBasic(enabled: Boolean, realm: String, user: String, password: String)
    case class HttpHeader(enabled: Boolean, headerName: String, headerValue: String)
    case class AllowedIps(enabled: Boolean, ips: Set[String]) {

      def containsAddress(inet: InetAddress): Boolean = {
        val ip           = inet.getHostAddress
        lazy val ipParts = ip.split('.')

        def checkSingle(pattern: String): Boolean =
          pattern == ip || (inet.isLoopbackAddress && pattern == "127.0.0.1") || (pattern
            .split('.')
            .zip(ipParts)
            .foldLeft(true) {
              case (r, (a, b)) =>
                r && (a == "*" || a == b)
            })

        ips.exists(checkSingle)
      }
    }
  }

  case class FullTextSearch(enabled: Boolean, recreateKey: Ident, solr: SolrConfig)

  object FullTextSearch {}

} 
Example 170
Source File: ConsulDiscoverySpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap.discovery

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.discovery.consul.ConsulServiceDiscovery
import akka.testkit.TestKitBase
import com.google.common.net.HostAndPort
import com.orbitz.consul.Consul
import com.orbitz.consul.model.catalog.ImmutableCatalogRegistration
import com.orbitz.consul.model.health.ImmutableService
import com.pszymczyk.consul.{ ConsulProcess, ConsulStarterBuilder }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }

import scala.concurrent.duration._

class ConsulDiscoverySpec extends WordSpecLike with Matchers with BeforeAndAfterAll with TestKitBase with ScalaFutures {

  private val consul: ConsulProcess = ConsulStarterBuilder.consulStarter().withHttpPort(8500).build().start()

  "Consul Discovery" should {
    "work for defaults" in {
      val consulAgent =
        Consul.builder().withHostAndPort(HostAndPort.fromParts(consul.getAddress, consul.getHttpPort)).build()
      consulAgent
        .catalogClient()
        .register(
          ImmutableCatalogRegistration
            .builder()
            .service(
              ImmutableService
                .builder()
                .addTags(s"system:${system.name}", "akka-management-port:1234")
                .address("127.0.0.1")
                .id("test")
                .service("test")
                .port(1235)
                .build()
            )
            .node("testNode")
            .address("localhost")
            .build()
        )

      val lookupService = new ConsulServiceDiscovery(system)
      val resolved = lookupService.lookup("test", 10.seconds).futureValue
      resolved.addresses should contain(
        ResolvedTarget(
          host = "127.0.0.1",
          port = Some(1234),
          address = Some(InetAddress.getByName("127.0.0.1"))
        )
      )
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    consul.close()
  }

  override implicit lazy val system: ActorSystem = ActorSystem("test")

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(50, Millis)))

} 
Example 171
Source File: EcsApiDemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.discovery.awsapi.ecs.AsyncEcsDiscovery
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import com.typesafe.config.ConfigFactory

object EcsApiDemoApp {

  def main(args: Array[String]): Unit = {
    val privateAddress = getPrivateAddressOrExit
    val config = ConfigFactory
      .systemProperties()
      .withFallback(
        ConfigFactory.parseString(s"""
             |akka {
             |  actor.provider = "cluster"
             |  management {
             |    cluster.bootstrap.contact-point.fallback-port = 8558
             |    http.hostname = "${privateAddress.getHostAddress}"
             |  }
             |  discovery.method = aws-api-ecs-async
             |  remote.netty.tcp.hostname = "${privateAddress.getHostAddress}"
             |}
           """.stripMargin)
      )
    val system = ActorSystem("ecsBootstrapDemoApp", config)
    AkkaManagement(system).start()
    ClusterBootstrap(system).start()
  }

  private[this] def getPrivateAddressOrExit: InetAddress =
    AsyncEcsDiscovery.getContainerAddress match {
      case Left(error) =>
        System.err.println(s"$error Halting.")
        sys.exit(1)

      case Right(value) =>
        value
    }

} 
Example 172
Source File: AkkaManagementSettings.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management

import java.net.InetAddress
import java.util.Optional

import scala.collection.JavaConverters._
import scala.collection.immutable
import scala.compat.java8.OptionConverters._

import akka.annotation.InternalApi
import com.typesafe.config.Config

final class AkkaManagementSettings(val config: Config) {
  private val managementConfig = config.getConfig("akka.management")

  object Http {
    private val cc = managementConfig.getConfig("http")

    val Hostname: String = {
      val hostname = cc.getString("hostname")
      if (hostname == "<hostname>") InetAddress.getLocalHost.getHostAddress
      else if (hostname.trim() == "") InetAddress.getLocalHost.getHostAddress
      else hostname
    }

    val Port: Int = {
      val p = cc.getInt("port")
      require(0 to 65535 contains p, s"akka.management.http.port must be 0 through 65535 (was ${p})")
      p
    }

    val EffectiveBindHostname: String = cc.getString("bind-hostname") match {
      case ""    => Hostname
      case value => value
    }

    val EffectiveBindPort: Int = cc.getString("bind-port") match {
      case "" => Port
      case value =>
        val p = value.toInt
        require(0 to 65535 contains p, s"akka.management.http.bind-port must be 0 through 65535 (was ${p})")
        p
    }

    val BasePath: Option[String] =
      Option(cc.getString("base-path")).flatMap(it => if (it.trim == "") None else Some(it))

    val RouteProviders: immutable.Seq[NamedRouteProvider] = {
      def validFQCN(value: Any) = {
        value != null &&
        value != "null" &&
        value.toString.trim.nonEmpty
      }

      cc.getConfig("routes")
        .root
        .unwrapped
        .asScala
        .collect {
          case (name, value) if validFQCN(value) => NamedRouteProvider(name, value.toString)
        }
        .toList
    }

    val RouteProvidersReadOnly: Boolean = cc.getBoolean("route-providers-read-only")
  }

  
@InternalApi private[akka] object AkkaManagementSettings {

  implicit class HasDefined(val config: Config) {
    def hasDefined(key: String): Boolean =
      config.hasPath(key) &&
      config.getString(key).trim.nonEmpty &&
      config.getString(key) != s"<$key>"

    def optDefinedValue(key: String): Option[String] =
      if (hasDefined(key)) Some(config.getString(key)) else None

    def optValue(key: String): Option[String] =
      config.getString(key) match {
        case ""    => None
        case other => Some(other)
      }
  }
}

final case class NamedRouteProvider(name: String, fullyQualifiedClassName: String) 
Example 173
Source File: AsyncEcsDiscovery.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.discovery.awsapi.ecs

import java.net.{ InetAddress, NetworkInterface }

import scala.collection.JavaConverters._

import akka.annotation.ApiMayChange

@ApiMayChange
object AsyncEcsDiscovery {

  // InetAddress.getLocalHost.getHostAddress throws an exception when running
  // in awsvpc mode because the container name cannot be resolved.
  // ECS provides a metadata file
  // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-metadata.html)
  // that we ought to be able to use instead to find our IP address, but the
  // metadata file does not get set when running on Fargate. So this is our
  // only option for determining what the canonical Akka and akka-management
  // hostname values should be set to.
  def getContainerAddress: Either[String, InetAddress] =
    NetworkInterface.getNetworkInterfaces.asScala
      .flatMap(_.getInetAddresses.asScala)
      .filterNot(_.isLoopbackAddress)
      .filter(_.isSiteLocalAddress)
      .toList match {
      case List(value) =>
        Right(value)

      case other =>
        Left(s"Exactly one private address must be configured (found: $other).")
    }
} 
Example 174
Source File: MarathonApiServiceDiscoverySpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.discovery.marathon

import java.net.InetAddress

import akka.discovery.ServiceDiscovery.ResolvedTarget
import org.scalatest.{ Matchers, WordSpec }
import spray.json._
import scala.io.Source

class MarathonApiServiceDiscoverySpec extends WordSpec with Matchers {
  "targets" should {
    "calculate the correct list of resolved targets" in {
      val data = resourceAsString("apps.json")

      val appList = JsonFormat.appListFormat.read(data.parseJson)

      MarathonApiServiceDiscovery.targets(appList, "management") shouldBe List(
        ResolvedTarget(
          host = "192.168.65.60",
          port = Some(23236),
          address = Option(InetAddress.getByName("192.168.65.60"))),
        ResolvedTarget(
          host = "192.168.65.111",
          port = Some(6850),
          address = Option(InetAddress.getByName("192.168.65.111")))
      )
    }
    "calculate the correct list of resolved targets for docker" in {
      val data = resourceAsString("docker-app.json")

      val appList = JsonFormat.appListFormat.read(data.parseJson)

      MarathonApiServiceDiscovery.targets(appList, "akkamgmthttp") shouldBe List(
        ResolvedTarget(
          host = "10.121.48.204",
          port = Some(29480),
          address = Option(InetAddress.getByName("10.121.48.204"))),
        ResolvedTarget(
          host = "10.121.48.204",
          port = Some(10136),
          address = Option(InetAddress.getByName("10.121.48.204")))
      )
    }
  }

  private def resourceAsString(name: String): String =
    Source.fromInputStream(getClass.getClassLoader.getResourceAsStream(name)).mkString
} 
Example 175
Source File: MarathonApiServiceDiscovery.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.discovery.marathon

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.discovery._
import akka.http.scaladsl._
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.Try

import AppList._
import JsonFormat._
import akka.annotation.ApiMayChange
import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget }
import akka.event.Logging

@ApiMayChange
object MarathonApiServiceDiscovery {

  
@ApiMayChange
class MarathonApiServiceDiscovery(system: ActorSystem) extends ServiceDiscovery {
  import MarathonApiServiceDiscovery._
  import system.dispatcher

  private val log = Logging(system, getClass)

  private val http = Http()(system)

  private val settings = Settings(system)

  private implicit val mat: ActorMaterializer = ActorMaterializer()(system)

  override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
    val uri =
      Uri(settings.appApiUrl).withQuery(
        Uri.Query(
          "embed" -> "apps.tasks",
          "embed" -> "apps.deployments",
          "label" -> settings.appLabelQuery.format(lookup.serviceName)))

    val request = HttpRequest(uri = uri)

    log.info("Requesting seed nodes by: {}", request.uri)

    val portName = lookup.portName match {
      case Some(name) => name
      case None       => settings.appPortName
    }

    for {
      response <- http.singleRequest(request)

      entity <- response.entity.toStrict(resolveTimeout)

      appList <- {
        log.debug("Marathon API entity: [{}]", entity.data.utf8String)
        val unmarshalled = Unmarshal(entity).to[AppList]

        unmarshalled.failed.foreach { _ =>
          log.error(
            "Failed to unmarshal Marathon API response status [{}], entity: [{}], uri: [{}]",
            response.status.value,
            entity.data.utf8String,
            uri)
        }
        unmarshalled
      }

    } yield Resolved(lookup.serviceName, targets(appList, portName))
  }

} 
Example 176
Source File: MessageChunkHeader.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.{InetAddress, InetSocketAddress}
import java.nio.ByteBuffer

private[nio] class MessageChunkHeader(
    val typ: Long,
    val id: Int,
    val totalSize: Int,
    val chunkSize: Int,
    val other: Int,
    val hasError: Boolean,
    val securityNeg: Int,
    val address: InetSocketAddress) {
  lazy val buffer = {
    // No need to change this, at 'use' time, we do a reverse lookup of the hostname.
    // Refer to network.Connection
    val ip = address.getAddress.getAddress()
    val port = address.getPort()
    ByteBuffer.
      allocate(MessageChunkHeader.HEADER_SIZE).
      putLong(typ).
      putInt(id).
      putInt(totalSize).
      putInt(chunkSize).
      putInt(other).
      put(if (hasError) 1.asInstanceOf[Byte] else 0.asInstanceOf[Byte]).
      putInt(securityNeg).
      putInt(ip.size).
      put(ip).
      putInt(port).
      position(MessageChunkHeader.HEADER_SIZE).
      flip.asInstanceOf[ByteBuffer]
  }

  override def toString = "" + this.getClass.getSimpleName + ":" + id + " of type " + typ +
      " and sizes " + totalSize + " / " + chunkSize + " bytes, securityNeg: " + securityNeg

}


private[nio] object MessageChunkHeader {
  val HEADER_SIZE = 45

  def create(buffer: ByteBuffer): MessageChunkHeader = {
    if (buffer.remaining != HEADER_SIZE) {
      throw new IllegalArgumentException("Cannot convert buffer data to Message")
    }
    val typ = buffer.getLong()
    val id = buffer.getInt()
    val totalSize = buffer.getInt()
    val chunkSize = buffer.getInt()
    val other = buffer.getInt()
    val hasError = buffer.get() != 0
    val securityNeg = buffer.getInt()
    val ipSize = buffer.getInt()
    val ipBytes = new Array[Byte](ipSize)
    buffer.get(ipBytes)
    val ip = InetAddress.getByAddress(ipBytes)
    val port = buffer.getInt()
    new MessageChunkHeader(typ, id, totalSize, chunkSize, other, hasError, securityNeg,
      new InetSocketAddress(ip, port))
  }
} 
Example 177
Source File: LauncherBackend.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.net.{InetAddress, Socket}

import org.apache.spark.SPARK_VERSION
import org.apache.spark.launcher.LauncherProtocol._
import org.apache.spark.util.{ThreadUtils, Utils}


  protected def onDisconnected() : Unit = { }

  private def fireStopRequest(): Unit = {
    val thread = LauncherBackend.threadFactory.newThread(new Runnable() {
      override def run(): Unit = Utils.tryLogNonFatalError {
        onStopRequest()
      }
    })
    thread.start()
  }

  private class BackendConnection(s: Socket) extends LauncherConnection(s) {

    override protected def handle(m: Message): Unit = m match {
      case _: Stop =>
        fireStopRequest()

      case _ =>
        throw new IllegalArgumentException(s"Unexpected message type: ${m.getClass().getName()}")
    }

    override def close(): Unit = {
      try {
        super.close()
      } finally {
        onDisconnected()
        _isConnected = false
      }
    }

  }

}

private object LauncherBackend {

  val threadFactory = ThreadUtils.namedThreadFactory("LauncherBackend")

} 
Example 178
Source File: DatadogMetricReporter.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.bandarlog.reporters

import java.net.InetAddress
import java.util.concurrent.TimeUnit

import com.aol.one.dwh.infra.config.{DatadogConfig, ReportConfig}
import com.codahale.metrics.MetricRegistry
import org.coursera.metrics.datadog.DatadogReporter
import org.coursera.metrics.datadog.transport.UdpTransport

import scala.collection.JavaConverters._


class DatadogMetricReporter(config: DatadogConfig, tags: List[String], metricRegistry: MetricRegistry, reportConf: ReportConfig)
  extends MetricReporter {

  private lazy val datadogReporter = {
    val udpTransport = new UdpTransport.Builder().build()

    DatadogReporter.
      forRegistry(metricRegistry).
      withTags(tags.asJava).
      withTransport(udpTransport).
      withHost(config.host.getOrElse(InetAddress.getLocalHost.getHostName)).
      build()
  }

  override def start(): Unit = {
    datadogReporter.start(reportConf.interval, TimeUnit.SECONDS)
  }

  override def stop(): Unit = {
    datadogReporter.stop()
  }

} 
Example 179
Source File: LagomDevModeServiceDiscoverySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode.internal.registry

import java.net.InetAddress
import java.net.URI

import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.Resolved
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.testkit.TestKit
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures._

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

class LagomDevModeServiceDiscoverySpec
    extends TestKit(ActorSystem("LagomDevModeSimpleServiceDiscoverySpec"))
    with AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll {
  private val client = new StaticServiceRegistryClient(
    Map(
      "test-service"              -> List(URI.create("http://localhost:8080")),
      "test-service-without-port" -> List(URI.create("http://localhost"))
    )
  )

  protected override def afterAll(): Unit = {
    shutdown(verifySystemShutdown = true)
  }

  private val discovery = LagomDevModeServiceDiscovery(system)
  discovery.setServiceRegistryClient(client)

  "DevModeSimpleServiceDiscoverySpec" should {
    "resolve services in the registry" in {
      val expected =
        Resolved("test-service", List(ResolvedTarget("localhost", Some(8080), Some(InetAddress.getLocalHost))))
      discovery.lookup("test-service", 100.milliseconds).futureValue shouldBe expected
    }

    "allow missing ports" in {
      val expected =
        Resolved("test-service-without-port", List(ResolvedTarget("localhost", None, Some(InetAddress.getLocalHost))))
      discovery.lookup("test-service-without-port", 100.milliseconds).futureValue shouldBe expected
    }
  }
}

private class StaticServiceRegistryClient(registrations: Map[String, List[URI]]) extends ServiceRegistryClient {
  override def locateAll(serviceName: String, portName: Option[String]): Future[immutable.Seq[URI]] =
    Future.successful(registrations.getOrElse(serviceName, Nil))
} 
Example 180
Source File: LagomDevModeServiceDiscovery.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode.internal.registry

import java.net.InetAddress
import java.net.URI

import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery._
import akka.discovery.Discovery
import akka.discovery.Lookup
import akka.discovery.ServiceDiscovery

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration.FiniteDuration

private[lagom] class LagomDevModeServiceDiscovery(system: ActorSystem) extends ServiceDiscovery {
  private val clientPromise = Promise[ServiceRegistryClient]

  private implicit val ec: ExecutionContext = system.dispatcher

  def setServiceRegistryClient(client: ServiceRegistryClient): Unit = clientPromise.success(client)

  override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] =
    for {
      client <- clientPromise.future
      uris   <- client.locateAll(lookup.serviceName, lookup.portName)
    } yield Resolved(lookup.serviceName, uris.map(toResolvedTarget))

  private def toResolvedTarget(uri: URI) =
    ResolvedTarget(
      uri.getHost,
      optionalPort(uri.getPort),
      // we don't have the InetAddress, but instead of using None
      // we default to localhost as such we can use it for Akka Cluster Bootstrap eventually
      address = Some(InetAddress.getLocalHost)
    )

  private def optionalPort(port: Int): Option[Int] = if (port < 0) None else Some(port)
}

private[lagom] object LagomDevModeServiceDiscovery {
  def apply(system: ActorSystem): LagomDevModeServiceDiscovery =
    Discovery(system)
      .loadServiceDiscovery("lagom-dev-mode")
      .asInstanceOf[LagomDevModeServiceDiscovery]
} 
Example 181
Source File: P2PGenerator.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.core.gen.p2p

import org.scalacheck.Gen
import org.bitcoins.core.p2p.NetworkIpAddress
import org.bitcoins.testkit.core.gen.NumberGenerator
import org.bitcoins.core.p2p.ServiceIdentifier
import java.net.InetAddress
import org.bitcoins.core.p2p.NetworkPayload

object P2PGenerator {

  
  def message: Gen[NetworkPayload] =
    Gen.oneOf(ControlMessageGenerator.controlMessage,
              DataMessageGenerator.dataMessage)

  def inetAddress: Gen[InetAddress] = {
    def ipRangeNum = Gen.choose(0, 255)
    for {
      first <- ipRangeNum
      second <- ipRangeNum
      third <- ipRangeNum
      fourth <- ipRangeNum
    } yield {
      // as long as we don't pass in a host name no IO is performed
      // https://stackoverflow.com/questions/5571744/java-convert-a-string-representing-an-ip-to-inetaddress
      InetAddress.getByName(s"$first.$second.$third.$fourth")
    }

  }

  def networkIpAddress: Gen[NetworkIpAddress] = {
    for {
      time <- NumberGenerator.uInt32s
      services <- serviceIdentifier
      address <- inetAddress
      port <- Gen.choose(1025, 64000)
    } yield NetworkIpAddress(time, services, address, port)
  }

  def serviceIdentifier: Gen[ServiceIdentifier] = {
    for {
      num <- NumberGenerator.uInt64
    } yield ServiceIdentifier(num)
  }
} 
Example 182
Source File: RawNetworkIpAddressSerializer.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.serializers.p2p

import java.net.InetAddress

import org.bitcoins.core.number.UInt32
import org.bitcoins.core.p2p._
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.core.util.BitcoinSLogger
import scodec.bits.ByteVector


trait RawNetworkIpAddressSerializer
    extends RawBitcoinSerializer[NetworkIpAddress]
    with BitcoinSLogger {

  def read(bytes: ByteVector): NetworkIpAddress = {
    val time = UInt32(bytes.take(4).reverse)
    val services = ServiceIdentifier(bytes.slice(4, 12))
    val ipBytes = bytes.slice(12, 28)
    val ipAddress = InetAddress.getByAddress(ipBytes.toArray)
    val port = bytes.slice(28, 30).toInt(signed = false)
    NetworkIpAddress(time, services, ipAddress, port)
  }

  def write(networkIpAddress: NetworkIpAddress): ByteVector = {
    val time = networkIpAddress.time.bytes.reverse
    val services = networkIpAddress.services.bytes
    val ipAddress = NetworkIpAddress.writeAddress(networkIpAddress.address)
    // uint16s are only 4 hex characters
    // cannot do fromShort,
    val port = ByteVector.fromInt(networkIpAddress.port, size = 2)
    time ++ services ++ ipAddress ++ port
  }

}

object RawNetworkIpAddressSerializer extends RawNetworkIpAddressSerializer 
Example 183
Source File: RawVersionMessageSerializer.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.serializers.p2p.messages

import java.net.InetAddress

import org.bitcoins.core.number.{Int32, Int64, UInt32, UInt64}
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.core.p2p._
import scodec.bits.ByteVector


trait RawVersionMessageSerializer
    extends RawBitcoinSerializer[VersionMessage]
    with BitcoinSLogger {

  def read(bytes: ByteVector): VersionMessage = {
    val version = ProtocolVersion(bytes.take(4))

    val services = ServiceIdentifier(bytes.slice(4, 12))

    val timestamp = Int64(bytes.slice(12, 20).reverse)

    val addressReceiveServices = ServiceIdentifier(bytes.slice(20, 28))

    val addressReceiveIpAddress =
      InetAddress.getByAddress(bytes.slice(28, 44).toArray)

    val addressReceivePort = UInt32(bytes.slice(44, 46)).toInt

    val addressTransServices = ServiceIdentifier(bytes.slice(46, 54))

    val addressTransIpAddress =
      InetAddress.getByAddress(bytes.slice(54, 70).toArray)

    val addressTransPort = UInt32(bytes.slice(70, 72)).toInt

    val nonce = UInt64(bytes.slice(72, 80))

    val userAgentSize =
      CompactSizeUInt.parseCompactSizeUInt(bytes.slice(80, bytes.size))

    val userAgentBytesStartIndex = 80 + userAgentSize.byteSize.toInt

    val userAgentBytes = bytes.slice(
      userAgentBytesStartIndex,
      userAgentBytesStartIndex + userAgentSize.num.toInt)

    val userAgent = userAgentBytes.toArray.map(_.toChar).mkString

    val startHeightStartIndex =
      userAgentBytesStartIndex + userAgentSize.num.toInt

    val startHeight = Int32(
      bytes.slice(startHeightStartIndex, startHeightStartIndex + 4).reverse)

    val relay = bytes(startHeightStartIndex + 4) != 0

    VersionMessage(
      version = version,
      services = services,
      timestamp = timestamp,
      addressReceiveServices = addressReceiveServices,
      addressReceiveIpAddress = addressReceiveIpAddress,
      addressReceivePort = addressReceivePort,
      addressTransServices = addressTransServices,
      addressTransIpAddress = addressTransIpAddress,
      addressTransPort = addressTransPort,
      nonce = nonce,
      userAgent = userAgent,
      startHeight = startHeight,
      relay = relay
    )
  }

  def write(versionMessage: VersionMessage): ByteVector = {
    versionMessage.version.bytes ++
      versionMessage.services.bytes ++
      versionMessage.timestamp.bytes.reverse ++
      versionMessage.addressReceiveServices.bytes ++
      NetworkIpAddress.writeAddress(versionMessage.addressReceiveIpAddress) ++
      //encode hex returns 8 characters, but we only need the last 4 since port number is a uint16
      //check for precision loss here?
      ByteVector.fromShort(versionMessage.addressReceivePort.toShort) ++
      versionMessage.addressTransServices.bytes ++
      NetworkIpAddress.writeAddress(versionMessage.addressTransIpAddress) ++
      //encode hex returns 8 characters, but we only need the last 4 since port number is a uint16
      //check for precision loss here?
      ByteVector.fromShort(versionMessage.addressTransPort.toShort) ++
      versionMessage.nonce.bytes ++
      versionMessage.userAgentSize.bytes ++
      ByteVector(versionMessage.userAgent.getBytes) ++
      versionMessage.startHeight.bytes.reverse ++
      (if (versionMessage.relay) ByteVector.fromByte(1.toByte)
       else ByteVector.fromByte(0.toByte))
  }

}

object RawVersionMessageSerializer extends RawVersionMessageSerializer 
Example 184
Source File: NetworkIpAddress.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.p2p

import java.net.{InetAddress, InetSocketAddress}

import org.bitcoins.core.number.UInt32
import org.bitcoins.core.serializers.p2p._
import org.bitcoins.crypto.{Factory, NetworkElement}
import scodec.bits._


  def writeAddress(iNetAddress: InetAddress): ByteVector = {
    if (iNetAddress.getAddress.size == 4) {
      //this means we need to convert the IPv4 address to an IPv6 address
      //first we have an 80 bit prefix of zeros
      val zeroBytes = ByteVector.fill(10)(0)
      //the next 16 bits are ones
      val oneBytes = hex"ffff"

      val prefix: ByteVector = zeroBytes ++ oneBytes
      val addr = prefix ++ ByteVector(iNetAddress.getAddress)
      addr
    } else {
      ByteVector(iNetAddress.getAddress)
    }
  }

  private case class NetworkIpAddressImpl(
      time: UInt32,
      services: ServiceIdentifier,
      address: InetAddress,
      port: Int)
      extends NetworkIpAddress

  def apply(
      time: UInt32,
      services: ServiceIdentifier,
      address: InetAddress,
      port: Int): NetworkIpAddress = {
    NetworkIpAddressImpl(time, services, address, port)
  }

  def fromBytes(bytes: ByteVector): NetworkIpAddress =
    RawNetworkIpAddressSerializer.read(bytes)

  def fromInetSocketAddress(
      socket: InetSocketAddress,
      services: ServiceIdentifier): NetworkIpAddress = {
    //TODO: this might be wrong, read this time documentation above
    val timestamp = UInt32(System.currentTimeMillis() / 1000)

    NetworkIpAddress(
      time = timestamp,
      services = services,
      address = socket.getAddress,
      port = socket.getPort
    )
  }
} 
Example 185
Source File: VersionMessageTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.p2p

import java.net.InetAddress
import java.time.Instant

import org.bitcoins.core.config.MainNet
import org.bitcoins.core.number.{Int32, UInt64}
import org.bitcoins.testkit.core.gen.p2p.ControlMessageGenerator
import org.bitcoins.testkit.util.BitcoinSUnitTest
import scodec.bits._

class VersionMessageTest extends BitcoinSUnitTest {

  it must "have serialization symmetry" in {
    forAll(ControlMessageGenerator.versionMessage) { versionMessage =>
      assert(VersionMessage(versionMessage.hex) == versionMessage)
    }
  }

  it must "have a meaningful toString message" in {
    forAll(ControlMessageGenerator.versionMessage) { version =>
      assert(version.toString.length < 350 + version.userAgent.length())
    }
  }

  "VersionMessage" must "create a new version message to be sent to another node on the network" in {
    val versionMessage = VersionMessage(MainNet, InetAddress.getLocalHost)
    assert(versionMessage.addressReceiveServices.nodeNone)
    versionMessage.addressReceiveIpAddress must be(InetAddress.getLocalHost)
    versionMessage.addressReceivePort must be(MainNet.port)

    assert(versionMessage.addressTransServices.nodeNetwork)
    versionMessage.addressTransIpAddress must be(InetAddress.getLocalHost)
    versionMessage.addressTransPort must be(MainNet.port)

    versionMessage.nonce must be(UInt64.zero)
    versionMessage.startHeight must be(Int32.zero)
    versionMessage.timestamp.toLong must be(Instant.now().toEpochMilli +- 1000)
  }

  it must "correctly deduce service flags" in {
    // extracted from log dump of local bitcoind running 0.17.0.1
    val msgBytes =
      hex"7f1101000d040000000000004ea1035d0000000000000000000000000000000000000000000000000000000000000d04000000000000000000000000000000000000000000000000fa562b93b3113e02122f5361746f7368693a302e31372e302e312f6800000001"
    val versionMessage = VersionMessage.fromBytes(msgBytes)

    assert(versionMessage.services.nodeNetwork)
    assert(!versionMessage.services.nodeGetUtxo)
    assert(versionMessage.services.nodeBloom)
    assert(versionMessage.services.nodeWitness)
    assert(!versionMessage.services.nodeXthin)
    assert(versionMessage.services.nodeNetworkLimited)
    assert(!versionMessage.services.nodeNone)
  }
} 
Example 186
Source File: ExasolConfiguration.scala    From spark-exasol-connector   with Apache License 2.0 5 votes vote down vote up
package com.exasol.spark.util

import java.net.InetAddress

import scala.util.matching.Regex


final case class ExasolConfiguration(
  host: String,
  port: Int,
  username: String,
  password: String,
  max_nodes: Int,
  create_table: Boolean,
  batch_size: Int
)

object ExasolConfiguration {

  val IPv4_DIGITS: String = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
  val IPv4_REGEX: Regex = raw"""^$IPv4_DIGITS\.$IPv4_DIGITS\.$IPv4_DIGITS\.$IPv4_DIGITS$$""".r

  def getLocalHost(): String = InetAddress.getLocalHost.getHostAddress

  def checkHost(host: String): String = host match {
    case IPv4_REGEX(_*) => host
    case _ =>
      throw new IllegalArgumentException(
        "The host value should be an ip address of the first Exasol data node!"
      )
  }

  @SuppressWarnings(
    Array("org.wartremover.warts.Overloading", "org.danielnixon.extrawarts.StringOpsPartial")
  )
  def apply(opts: Map[String, String]): ExasolConfiguration =
    ExasolConfiguration(
      host = checkHost(opts.getOrElse("host", getLocalHost())),
      port = opts.getOrElse("port", "8888").toInt,
      username = opts.getOrElse("username", "sys"),
      password = opts.getOrElse("password", "exasol"),
      max_nodes = opts.getOrElse("max_nodes", "200").toInt,
      create_table = opts.getOrElse("create_table", "false").toBoolean,
      batch_size = opts.getOrElse("batch_size", "1000").toInt
    )

} 
Example 187
Source File: VersionMessageTest.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.messages.control

import java.net.InetAddress

import org.bitcoins.core.config.MainNet
import org.bitcoins.core.number.{Int32, UInt64}
import org.joda.time.DateTime
import org.scalatest.{FlatSpec, MustMatchers}


class VersionMessageTest extends FlatSpec with MustMatchers {

  "VersionMessage" must "create a new version message to be sent to another node on the network" in {
    val versionMessage = VersionMessage(MainNet, InetAddress.getLocalHost)
    versionMessage.addressReceiveServices must be (UnnamedService)
    versionMessage.addressReceiveIpAddress must be (InetAddress.getLocalHost)
    versionMessage.addressReceivePort must be (MainNet.port)

    versionMessage.addressTransServices must be (NodeNetwork)
    versionMessage.addressTransIpAddress must be (InetAddress.getLocalHost)
    versionMessage.addressTransPort must be (MainNet.port)

    versionMessage.nonce must be (UInt64.zero)
    versionMessage.startHeight must be (Int32.zero)
    versionMessage.timestamp.underlying must be (DateTime.now.getMillis +- 1000)
  }
} 
Example 188
Source File: RawNetworkIpAddressSerializer.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.serializers.messages.control

import java.net.InetAddress

import org.bitcoins.core.number.UInt32
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil, NumberUtil}
import org.bitcoins.spvnode.messages.control.ServiceIdentifier
import org.bitcoins.spvnode.util.{BitcoinSpvNodeUtil, NetworkIpAddress}


trait RawNetworkIpAddressSerializer extends RawBitcoinSerializer[NetworkIpAddress] with BitcoinSLogger {

  def read(bytes : List[Byte]) : NetworkIpAddress = {
    val time = UInt32(bytes.take(4).reverse)
    val services = ServiceIdentifier(bytes.slice(4,12))
    val ipBytes = bytes.slice(12,28)
    val ipAddress = InetAddress.getByAddress(ipBytes.toArray)
    val port = NumberUtil.toLong(bytes.slice(28,30)).toInt
    NetworkIpAddress(time,services,ipAddress,port)
  }

  def write(networkIpAddress: NetworkIpAddress) : String = {
    val time = BitcoinSUtil.flipEndianness(networkIpAddress.time.bytes)
    val services = networkIpAddress.services.hex
    val ipAddress = BitcoinSpvNodeUtil.writeAddress(networkIpAddress.address)
    //uint16s are only 4 hex characters
    val port = BitcoinSUtil.encodeHex(networkIpAddress.port).slice(4,8)
    time + services + ipAddress + port
  }


}

object RawNetworkIpAddressSerializer extends RawNetworkIpAddressSerializer 
Example 189
Source File: HostAddressResolver.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.workflowexecutor.executor

import java.net.{Inet6Address, InetAddress, NetworkInterface}

import scala.util.Try

import ai.deepsense.commons.utils.Logging

object HostAddressResolver extends Logging {

  def findHostAddress(): InetAddress = {
    import collection.JavaConversions._
    Try {
      val interfaces = NetworkInterface.getNetworkInterfaces.toIterable
      interfaces.flatMap { n =>
        n.getInetAddresses.toIterable.filter {
          address =>
            !address.isInstanceOf[Inet6Address] &&
            !address.isLoopbackAddress &&
            !address.isSiteLocalAddress &&
            !address.isLinkLocalAddress &&
            !address.isAnyLocalAddress &&
            !address.isMulticastAddress &&
            !(address.getHostAddress == "255.255.255.255")
        }
      }
    }.get.headOption.getOrElse(InetAddress.getByName("127.0.0.1"))
  }
} 
Example 190
Source File: LauncherBackend.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.launcher

import java.net.{InetAddress, Socket}

import org.apache.spark.{SPARK_VERSION, SparkConf}
import org.apache.spark.launcher.LauncherProtocol._
import org.apache.spark.util.{ThreadUtils, Utils}


  protected def onDisconnected() : Unit = { }

  private def fireStopRequest(): Unit = {
    val thread = LauncherBackend.threadFactory.newThread(new Runnable() {
      override def run(): Unit = Utils.tryLogNonFatalError {
        onStopRequest()
      }
    })
    thread.start()
  }

  private class BackendConnection(s: Socket) extends LauncherConnection(s) {

    override protected def handle(m: Message): Unit = m match {
      case _: Stop =>
        fireStopRequest()

      case _ =>
        throw new IllegalArgumentException(s"Unexpected message type: ${m.getClass().getName()}")
    }

    override def close(): Unit = {
      try {
        super.close()
      } finally {
        onDisconnected()
        _isConnected = false
      }
    }

  }

}

private object LauncherBackend {

  val threadFactory = ThreadUtils.namedThreadFactory("LauncherBackend")

} 
Example 191
Source File: PythonGatewayServer.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.python

import java.io.{DataOutputStream, File, FileOutputStream}
import java.net.InetAddress
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.Files

import py4j.GatewayServer

import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils


private[spark] object PythonGatewayServer extends Logging {
  initializeLogIfNecessary(true)

  def main(args: Array[String]): Unit = {
    val secret = Utils.createSecret(new SparkConf())

    // Start a GatewayServer on an ephemeral port. Make sure the callback client is configured
    // with the same secret, in case the app needs callbacks from the JVM to the underlying
    // python processes.
    val localhost = InetAddress.getLoopbackAddress()
    val gatewayServer: GatewayServer = new GatewayServer.GatewayServerBuilder()
      .authToken(secret)
      .javaPort(0)
      .javaAddress(localhost)
      .callbackClient(GatewayServer.DEFAULT_PYTHON_PORT, localhost, secret)
      .build()

    gatewayServer.start()
    val boundPort: Int = gatewayServer.getListeningPort
    if (boundPort == -1) {
      logError("GatewayServer failed to bind; exiting")
      System.exit(1)
    } else {
      logDebug(s"Started PythonGatewayServer on port $boundPort")
    }

    // Communicate the connection information back to the python process by writing the
    // information in the requested file. This needs to match the read side in java_gateway.py.
    val connectionInfoPath = new File(sys.env("_PYSPARK_DRIVER_CONN_INFO_PATH"))
    val tmpPath = Files.createTempFile(connectionInfoPath.getParentFile().toPath(),
      "connection", ".info").toFile()

    val dos = new DataOutputStream(new FileOutputStream(tmpPath))
    dos.writeInt(boundPort)

    val secretBytes = secret.getBytes(UTF_8)
    dos.writeInt(secretBytes.length)
    dos.write(secretBytes, 0, secretBytes.length)
    dos.close()

    if (!tmpPath.renameTo(connectionInfoPath)) {
      logError(s"Unable to write connection information to $connectionInfoPath.")
      System.exit(1)
    }

    // Exit on EOF or broken pipe to ensure that this process dies when the Python driver dies:
    while (System.in.read() != -1) {
      // Do nothing
    }
    logDebug("Exiting due to broken pipe from Python driver")
    System.exit(0)
  }
} 
Example 192
Source File: DockerUtils.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.net.{Inet4Address, InetAddress, NetworkInterface}

import scala.collection.JavaConverters._
import scala.sys.process._
import scala.util.Try

private[spark] object DockerUtils {

  def getDockerIp(): String = {
    
    def findFromDockerMachine(): Option[String] = {
      sys.env.get("DOCKER_MACHINE_NAME").flatMap { name =>
        Try(Seq("/bin/bash", "-c", s"docker-machine ip $name 2>/dev/null").!!.trim).toOption
      }
    }
    sys.env.get("DOCKER_IP")
      .orElse(findFromDockerMachine())
      .orElse(Try(Seq("/bin/bash", "-c", "boot2docker ip 2>/dev/null").!!.trim).toOption)
      .getOrElse {
        // This block of code is based on Utils.findLocalInetAddress(), but is modified to blacklist
        // certain interfaces.
        val address = InetAddress.getLocalHost
        // Address resolves to something like 127.0.1.1, which happens on Debian; try to find
        // a better address using the local network interfaces
        // getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
        // on unix-like system. On windows, it returns in index order.
        // It's more proper to pick ip address following system output order.
        val blackListedIFs = Seq(
          "vboxnet0",  // Mac
          "docker0"    // Linux
        )
        val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter { i =>
          !blackListedIFs.contains(i.getName)
        }
        val reOrderedNetworkIFs = activeNetworkIFs.reverse
        for (ni <- reOrderedNetworkIFs) {
          val addresses = ni.getInetAddresses.asScala
            .filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
          if (addresses.nonEmpty) {
            val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
            // because of Inet6Address.toHostName may add interface at the end if it knows about it
            val strippedAddress = InetAddress.getByAddress(addr.getAddress)
            return strippedAddress.getHostAddress
          }
        }
        address.getHostAddress
      }
  }
} 
Example 193
Source File: Connectors.scala    From scylla-migrator   with Apache License 2.0 5 votes vote down vote up
package com.scylladb.migrator

import java.net.InetAddress

import com.datastax.spark.connector.cql.{
  CassandraConnector,
  CassandraConnectorConf,
  NoAuthConf,
  PasswordAuthConf
}
import com.scylladb.migrator.config.{ Credentials, SourceSettings, TargetSettings }
import org.apache.spark.SparkConf

object Connectors {
  def sourceConnector(sparkConf: SparkConf, sourceSettings: SourceSettings.Cassandra) =
    new CassandraConnector(
      CassandraConnectorConf(sparkConf).copy(
        hosts = Set(InetAddress.getByName(sourceSettings.host)),
        port  = sourceSettings.port,
        authConf = sourceSettings.credentials match {
          case None                                  => NoAuthConf
          case Some(Credentials(username, password)) => PasswordAuthConf(username, password)
        },
        maxConnectionsPerExecutor = sourceSettings.connections,
        queryRetryCount           = -1
      )
    )

  def targetConnector(sparkConf: SparkConf, targetSettings: TargetSettings) =
    new CassandraConnector(
      CassandraConnectorConf(sparkConf).copy(
        hosts = Set(InetAddress.getByName(targetSettings.host)),
        port  = targetSettings.port,
        authConf = targetSettings.credentials match {
          case None                                  => NoAuthConf
          case Some(Credentials(username, password)) => PasswordAuthConf(username, password)
        },
        maxConnectionsPerExecutor = targetSettings.connections,
        queryRetryCount           = -1
      )
    )
} 
Example 194
Source File: AkkaDiscoveryNameResolver.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.{ InetAddress, InetSocketAddress, UnknownHostException }

import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.discovery.{ Lookup, ServiceDiscovery }
import akka.grpc.GrpcClientSettings
import io.grpc.{ Attributes, EquivalentAddressGroup, NameResolver, Status }
import io.grpc.NameResolver.Listener

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ ExecutionContext, Promise }
import scala.util.{ Failure, Success }

class AkkaDiscoveryNameResolver(
    discovery: ServiceDiscovery,
    defaultPort: Int,
    serviceName: String,
    portName: Option[String],
    protocol: Option[String],
    resolveTimeout: FiniteDuration)(implicit val ec: ExecutionContext)
    extends NameResolver {
  override def getServiceAuthority: String = serviceName

  val listener: Promise[Listener] = Promise()

  override def start(l: Listener): Unit = {
    listener.trySuccess(l)
    lookup(l)
  }

  override def refresh(): Unit =
    listener.future.onComplete {
      case Success(l) => lookup(l)
      case Failure(_) => // We never fail this promise
    }

  def lookup(listener: Listener): Unit = {
    discovery.lookup(Lookup(serviceName, portName, protocol), resolveTimeout).onComplete {
      case Success(result) =>
        try {
          listener.onAddresses(addresses(result.addresses), Attributes.EMPTY)
        } catch {
          case e: UnknownHostException =>
            // TODO at least log
            listener.onError(Status.UNKNOWN.withDescription(e.getMessage))
        }
      case Failure(e) =>
        // TODO at least log
        listener.onError(Status.UNKNOWN.withDescription(e.getMessage))
    }
  }

  @throws[UnknownHostException]
  private def addresses(addresses: Seq[ResolvedTarget]) = {
    import scala.collection.JavaConverters._
    addresses
      .map(target => {
        val port = target.port.getOrElse(defaultPort)
        val address = target.address.getOrElse(InetAddress.getByName(target.host))
        new EquivalentAddressGroup(new InetSocketAddress(address, port))
      })
      .asJava
  }

  override def shutdown(): Unit = ()
}

object AkkaDiscoveryNameResolver {
  def apply(settings: GrpcClientSettings)(implicit ec: ExecutionContext): AkkaDiscoveryNameResolver =
    new AkkaDiscoveryNameResolver(
      settings.serviceDiscovery,
      settings.defaultPort,
      settings.serviceName,
      settings.servicePortName,
      settings.serviceProtocol,
      settings.resolveTimeout)
} 
Example 195
Source File: NetUtils.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.utils

import java.net.{Inet4Address, InetAddress, NetworkInterface}

import scala.collection.JavaConverters._

object NetUtils {

  
  def findLocalInetAddress(): InetAddress = {
    val address = InetAddress.getLocalHost
    if (address.isLoopbackAddress) {
      val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toList.reverse
      for (ni <- activeNetworkIFs) {
        val addresses = ni.getInetAddresses.asScala.toList
          .filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress)
        if (addresses.nonEmpty) {
          val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
          val strippedAddress = InetAddress.getByAddress(addr.getAddress)
          return strippedAddress
        }
      }
    }
    address
  }
} 
Example 196
Source File: JsonEncoderSpec.scala    From logback-json-logger   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.play.logging
import java.io.{PrintWriter, StringWriter}
import java.net.InetAddress

import ch.qos.logback.classic.Level
import ch.qos.logback.classic.spi.{ILoggingEvent, ThrowableProxy}
import ch.qos.logback.core.ContextBase
import org.apache.commons.lang3.time.FastDateFormat
import org.mockito.Mockito.when
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.mockito.MockitoSugar
import play.api.libs.json.{JsLookupResult, Json}

import scala.collection.JavaConverters._

class JsonEncoderSpec extends AnyWordSpec with Matchers with MockitoSugar {

  "Json-encoded message" should {
    "contain all required fields" in {

      val jsonEncoder = new JsonEncoder()
      val event       = mock[ILoggingEvent]

      when(event.getTimeStamp).thenReturn(1)
      when(event.getLevel).thenReturn(Level.INFO)
      when(event.getThreadName).thenReturn("my-thread")
      when(event.getFormattedMessage).thenReturn("my-message")
      when(event.getLoggerName).thenReturn("logger-name")
      when(event.getMDCPropertyMap).thenReturn(Map("myMdcProperty" -> "myMdcValue").asJava)

      val testException = new Exception("test-exception")
      val stringWriter  = new StringWriter()
      testException.printStackTrace(new PrintWriter(stringWriter))
      when(event.getThrowableProxy).thenReturn(new ThrowableProxy(testException))

      jsonEncoder.setContext {
        val ctx = new ContextBase()
        ctx.putProperty("myKey", "myValue")
        ctx
      }

      val result       = new String(jsonEncoder.encode(event), "UTF-8")
      val resultAsJson = Json.parse(result)

      (resultAsJson \ "app").asString           shouldBe "my-app-name"
      (resultAsJson \ "hostname").asString      shouldBe InetAddress.getLocalHost.getHostName
      (resultAsJson \ "timestamp").asString     shouldBe FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss.SSSZZ").format(1)
      (resultAsJson \ "message").asString       shouldBe "my-message"
      (resultAsJson \ "exception").asString     should include("test-exception")
      (resultAsJson \ "exception").asString     should include("java.lang.Exception")
      (resultAsJson \ "exception").asString     should include(stringWriter.toString)
      (resultAsJson \ "logger").asString        shouldBe "logger-name"
      (resultAsJson \ "thread").asString        shouldBe "my-thread"
      (resultAsJson \ "level").asString         shouldBe "INFO"
      (resultAsJson \ "mykey").asString         shouldBe "myValue"
      (resultAsJson \ "mymdcproperty").asString shouldBe "myMdcValue"

    }
  }

  implicit class JsLookupResultOps(jsLookupResult: JsLookupResult) {
    def asString: String = jsLookupResult.get.as[String]
  }

} 
Example 197
Source File: MessageChunkHeader.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.network.nio

import java.net.{InetAddress, InetSocketAddress}
import java.nio.ByteBuffer

private[nio] class MessageChunkHeader(
    val typ: Long,
    val id: Int,
    val totalSize: Int,
    val chunkSize: Int,
    val other: Int,
    val hasError: Boolean,
    val securityNeg: Int,
    val address: InetSocketAddress) {
  lazy val buffer = {
    // No need to change this, at 'use' time, we do a reverse lookup of the hostname.
    //不需要改变这个,在'use使用'的时候,我们对主机名做反向查询
    // Refer to network.Connection
    val ip = address.getAddress.getAddress()
    val port = address.getPort()
    ByteBuffer.
      allocate(MessageChunkHeader.HEADER_SIZE).
      putLong(typ).
      putInt(id).
      putInt(totalSize).
      putInt(chunkSize).
      putInt(other).
      put(if (hasError) 1.asInstanceOf[Byte] else 0.asInstanceOf[Byte]).
      putInt(securityNeg).
      putInt(ip.size).
      put(ip).
      putInt(port).
      position(MessageChunkHeader.HEADER_SIZE).
      flip.asInstanceOf[ByteBuffer]
  }

  override def toString: String = {
    "" + this.getClass.getSimpleName + ":" + id + " of type " + typ +
      " and sizes " + totalSize + " / " + chunkSize + " bytes, securityNeg: " + securityNeg
  }

}


private[nio] object MessageChunkHeader {
  val HEADER_SIZE = 45

  def create(buffer: ByteBuffer): MessageChunkHeader = {
    if (buffer.remaining != HEADER_SIZE) {
      throw new IllegalArgumentException("Cannot convert buffer data to Message")
    }
    val typ = buffer.getLong()
    val id = buffer.getInt()
    val totalSize = buffer.getInt()
    val chunkSize = buffer.getInt()
    val other = buffer.getInt()
    val hasError = buffer.get() != 0
    val securityNeg = buffer.getInt()
    val ipSize = buffer.getInt()
    val ipBytes = new Array[Byte](ipSize)
    buffer.get(ipBytes)
    val ip = InetAddress.getByAddress(ipBytes)
    val port = buffer.getInt()
    new MessageChunkHeader(typ, id, totalSize, chunkSize, other, hasError, securityNeg,
      new InetSocketAddress(ip, port))
  }
} 
Example 198
Source File: ExamplesTest.scala    From json-schema-codegen   with Apache License 2.0 5 votes vote down vote up
import java.net.{Inet6Address, InetAddress, Inet4Address, URI}
import java.util.Date

import argonaut.Argonaut._
import argonaut._
import org.scalatest.{FlatSpec, Matchers}
import product.vox.shop._


class ExamplesTest extends FlatSpec with Matchers {


  "AdditionalPropertiesJson" should "encode and decode" in {
    import additional.Codecs._
    test(additional.Properties("bvalue", Some(Map("p1" -> additional.PropertiesAdditional(1)))))
  }

  "AdditionalPropertiesOnlyJson" should "encode and decode" in {
    import additional.properties.Codecs._
    test(additional.properties.Only(Some(Map("p1" -> additional.properties.OnlyAdditional(1)))))
  }

  "EnumStrings" should "encode and decode" in {
    import Codecs._
    test(Strings.a)
    test(Strings.b)
  }

  "EnumIntegers" should "encode and decode" in {
    import Codecs._
    test(Integers.v10)
    test(Integers.v20)
  }

  "Formats" should "encode and decode" in {
    import Codecs._
    test(Formats(
      new URI("http://uri/address"),
      InetAddress.getByName("127.0.0.1").asInstanceOf[Inet4Address],
      InetAddress.getByName("FE80:0000:0000:0000:0202:B3FF:FE1E:8329").asInstanceOf[Inet6Address],
      new Date()
    ))
  }

  "Product" should "decode from string and encode to string" in {
    import product.vox.shop.Codecs._
    val js = """{"name":"Recharge Cards (5 PIN)","prices":[{"cost":0.0187,"currency":"USD","moq":200000}],"eid":"iso-card-5-pin","description":"<p>ISO card, 5 PINs, printed 4 colour front and back</p>\n<p>Every card option shown below meets Tier 1 operator quality standards, at a competitive pricing including freight to your country that’s always openly visible, with streamlined fulfillment and support included, creating what we believe is the best overall value at the lowest total cost of ownership in the industry.</p>\n<p>Material:        Cardboard 300 GSM, UV varnish both sides</p>\n<p>Scratch panel:   Silver/Black Ink with black overprint</p> \n<p>Individually plastic wrapped in chain of 50 cards</p>\n<p>Small boxes of 500 cards, Master Carton of 5000 cards</p>\n<p>Alternate names: Scratch cards, RCV, top-up cards</p>\n","properties":[{"name":"Overscratch Protection","options":[{"name":"No protection"},{"name":"Protective measures against over scratching","prices":[{"cost":0.0253,"currency":"USD","moq":200000},{"cost":0.021,"currency":"USD","moq":500000},{"cost":0.02,"currency":"USD","moq":1000000},{"cost":0.0188,"currency":"USD","moq":5000000,"leadtime":21},{"cost":0.0173,"currency":"USD","moq":10000000},{"cost":0.0171,"currency":"USD","moq":50000000,"leadtime":28}]}]},{"name":"Payment terms","options":[{"name":"Payment on shipment readiness"},{"name":"Net 30 (subject to approval)"}]},{"name":"Order Timing","options":[{"name":"Ship order when ready"},{"name":"Pre-order for shipment in 3 months"}]}],"client":"112","sample":{"price":{"cost":250,"currency":"USD"}},"category":"recharge_cards","leadtime":14,"imageUrl":["https://d2w2n7dk76p3lq.cloudfront.net/product_image/recharge_cards/iso-5pin.png"],"types":[{"name":"Recharge Cards (5 PIN)","prices":[{"cost":0.0187,"currency":"USD","moq":200000},{"cost":0.0175,"currency":"USD","moq":500000},{"cost":0.0162,"currency":"USD","moq":1000000},{"cost":0.0153,"currency":"USD","moq":5000000,"leadtime":21},{"cost":0.0138,"currency":"USD","moq":10000000,"leadtime":28},{"cost":0.0137,"currency":"USD","moq":50000000,"leadtime":28}]}],"presentation":1000}"""
    val po = js.decodeValidation[Product]
    println(po)
    po.isSuccess shouldBe true
    test(po.toOption.get)
  }


  def test[T: CodecJson](value: T) = {
    val json = value.asJson
    println(json)
    json.jdecode[T] shouldBe DecodeResult.ok(value)
  }
} 
Example 199
Source File: HostAddressResolver.scala    From seahorse   with Apache License 2.0 5 votes vote down vote up
package ai.deepsense.sessionmanager.service

import java.net.{Inet6Address, InetAddress, NetworkInterface}

import scala.util.Try

import ai.deepsense.commons.utils.Logging

object HostAddressResolver extends Logging {

  def getHostAddress(): String = {
    val address = if (System.getenv("MANAGED_BY") == "TAP") {
      logger.info("Getting host address from environment")
      getHostAddressFromEnv()
    } else {
      logger.info("Getting host address from network interface")
      getHostAddressFromInterface()
    }
    logger.info(s"Host address: $address")
    address
  }

  private def getHostAddressFromInterface(): String = {
    import collection.JavaConversions._
    Try {
      val interfaces = NetworkInterface.getNetworkInterfaces.toIterable
      interfaces.flatMap { n =>
        n.getInetAddresses.toIterable.filter {
          address =>
            !address.isInstanceOf[Inet6Address] &&
              !address.isLoopbackAddress &&
              !address.isLinkLocalAddress &&
              !address.isAnyLocalAddress &&
              !address.isMulticastAddress &&
              !(address.getHostAddress == "255.255.255.255")
        }
      }
    }.get.headOption.getOrElse(InetAddress.getByName("127.0.0.1")).getHostAddress
  }

  private def getHostAddressFromEnv(): String = {
    val hostname = System.getenv("HOSTNAME")
    val podId = hostname.toUpperCase.split("-").head
    val clusterIp = System.getenv(s"${podId}_SERVICE_HOST")
    clusterIp
  }
} 
Example 200
Source File: RawVersionMessageSerializer.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.serializers.messages.control

import java.net.InetAddress

import org.bitcoins.core.number.{Int32, Int64, UInt32, UInt64}
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.serializers.RawBitcoinSerializer
import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil}
import org.bitcoins.spvnode.messages.VersionMessage
import org.bitcoins.spvnode.messages.control.{ServiceIdentifier, VersionMessage}
import org.bitcoins.spvnode.util.BitcoinSpvNodeUtil
import org.bitcoins.spvnode.versions.ProtocolVersion


trait RawVersionMessageSerializer extends RawBitcoinSerializer[VersionMessage] with BitcoinSLogger {

  def read(bytes : List[Byte]) : VersionMessage = {
    val version = ProtocolVersion(bytes.take(4))
    val services = ServiceIdentifier(bytes.slice(4,12))
    val timestamp = Int64(bytes.slice(12,20).reverse)
    val addressReceiveServices = ServiceIdentifier(bytes.slice(20,28))
    val addressReceiveIpAddress = InetAddress.getByAddress(bytes.slice(28,44).toArray)
    val addressReceivePort = UInt32(bytes.slice(44,46)).underlying.toInt
    val addressTransServices = ServiceIdentifier(bytes.slice(46,54))
    val addressTransIpAddress = InetAddress.getByAddress(bytes.slice(54,70).toArray)
    val addressTransPort = UInt32(bytes.slice(70,72)).underlying.toInt
    val nonce = UInt64(bytes.slice(72,80))
    val userAgentSize = CompactSizeUInt.parseCompactSizeUInt(bytes.slice(80,bytes.size))
    val userAgentBytesStartIndex = 80 + userAgentSize.size.toInt
    val userAgentBytes = bytes.slice(userAgentBytesStartIndex, userAgentBytesStartIndex + userAgentSize.num.toInt)
    val userAgent = userAgentBytes.map(_.toChar).mkString
    val startHeightStartIndex = (userAgentBytesStartIndex + userAgentSize.num.toInt).toInt
    val startHeight = Int32(bytes.slice(startHeightStartIndex, startHeightStartIndex + 4).reverse)
    val relay = bytes(startHeightStartIndex + 4) != 0

    VersionMessage(version,services,timestamp, addressReceiveServices, addressReceiveIpAddress,
      addressReceivePort, addressTransServices, addressTransIpAddress, addressTransPort,
      nonce, userAgent, startHeight, relay)
  }

  def write(versionMessage: VersionMessage) : String = {
    versionMessage.version.hex + versionMessage.services.hex +
      BitcoinSUtil.flipEndianness(versionMessage.timestamp.hex) +
      versionMessage.addressReceiveServices.hex +
      BitcoinSpvNodeUtil.writeAddress(versionMessage.addressReceiveIpAddress) +
      //encode hex returns 8 characters, but we only need the last 4 since port number is a uint16
      BitcoinSUtil.encodeHex(versionMessage.addressReceivePort).slice(4,8) +
      versionMessage.addressTransServices.hex +
      BitcoinSpvNodeUtil.writeAddress(versionMessage.addressTransIpAddress) +
      //encode hex returns 8 characters, but we only need the last 4 since port number is a uint16
      BitcoinSUtil.encodeHex(versionMessage.addressTransPort).slice(4,8) +
      versionMessage.nonce.hex +
      versionMessage.userAgentSize.hex +
      BitcoinSUtil.encodeHex(versionMessage.userAgent.getBytes) +
      BitcoinSUtil.flipEndianness(versionMessage.startHeight.hex) +
      (if (versionMessage.relay) "01" else "00")
  }

}

object RawVersionMessageSerializer extends RawVersionMessageSerializer